diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..34d92e47e --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,8 @@ +--- +# https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem +version: 2 +updates: + - package-ecosystem: gitsubmodule + directory: / + schedule: + interval: daily diff --git a/.github/workflows/build-pdf.yml b/.github/workflows/build-pdf.yml index d5ac7d630..168bcfcef 100644 --- a/.github/workflows/build-pdf.yml +++ b/.github/workflows/build-pdf.yml @@ -4,7 +4,6 @@ on: push: branches: - main - pull_request: release: types: - created @@ -13,7 +12,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 with: submodules: recursive - name: Install packages @@ -26,7 +25,7 @@ jobs: run: make -C doc build - name: Upload artifact - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v4 with: name: v-intrinsic-spec.pdf path: doc/v-intrinsic-spec.pdf @@ -37,7 +36,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Download artifact - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v4 with: name: v-intrinsic-spec.pdf path: ./doc/ @@ -74,7 +73,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Download artifact - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v4 with: name: v-intrinsic-spec.pdf path: ./doc diff --git a/.github/workflows/clang-compilation.yml b/.github/workflows/clang-compilation.yml index ad6b0efa6..dae68e44f 100644 --- a/.github/workflows/clang-compilation.yml +++ b/.github/workflows/clang-compilation.yml @@ -4,20 +4,32 @@ on: [push] jobs: build: runs-on: ubuntu-latest + strategy: + matrix: + llvm-version: ["main", "latest-rel"] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: '3.11' - name: Prerequisites run: | - sudo apt-get install autoconf automake autotools-dev curl python3 python3-pip libmpc-dev libmpfr-dev libgmp-dev gawk build-essential bison flex texinfo gperf libtool patchutils bc zlib1g-dev libexpat-dev ninja-build git cmake libglib2.0-dev dejagnu + sudo apt-get install autoconf automake autotools-dev curl libmpc-dev libmpfr-dev libgmp-dev gawk build-essential bison flex texinfo gperf libtool patchutils bc zlib1g-dev libexpat-dev ninja-build git cmake libglib2.0-dev dejagnu - name: Install dependencies run: | - python -m pip install --upgrade pip - pip install junitparser + pip install --user junitparser - name: Download LLVM run: | cd .. rm -rf llvm-project - git clone https://github.com/llvm/llvm-project + git clone https://github.com/llvm/llvm-project -j `nproc` + - name: Checkout LLVM version + run: | + cd ../llvm-project + if [ "${{ matrix.llvm-version }}" = "latest-rel" ]; then + latestTag=$(git describe --tags `git rev-list --tags --max-count=1`) + git checkout $latestTag + fi - name: Build LLVM with Ninja run: | cd ../llvm-project @@ -32,18 +44,18 @@ jobs: -DLLVM_DEFAULT_TARGET_TRIPLE="riscv64-unknown-linux-gnu" \ -DLLVM_ENABLE_PROJECTS="clang;lld" \ ../llvm - ninja -j 4 + ninja -j `nproc` echo $(pwd) ls bin - name: Run compilation test, non-overloaded intrinsics (default (TAMA) policy) run: | - make -C rvv-intrinsic-generator run-api-testing COMPILER=$(pwd)/../llvm-project/build/bin/clang EXTRA_CFLAGS="-target riscv64" + make -C rvv-intrinsic-generator run-api-testing run-bfloat16-api-testing run-vector-crypto-api-testing COMPILER=$(pwd)/../llvm-project/build/bin/clang EXTRA_CFLAGS="-target riscv64" - name: Run compilation test, overloaded intrinsics (default (TAMA) policy) run: | - make -C rvv-intrinsic-generator run-overloaded-api-testing COMPILER=$(pwd)/../llvm-project/build/bin/clang EXTRA_CFLAGS="-target riscv64" + make -C rvv-intrinsic-generator run-overloaded-api-testing run-bfloat16-overloaded-api-testing run-vector-crypto-overloaded-api-testing COMPILER=$(pwd)/../llvm-project/build/bin/clang EXTRA_CFLAGS="-target riscv64" - name: Run compilation test, non-overloaded intrinsics (non-default policy) run: | - make -C rvv-intrinsic-generator run-policy-api-testing COMPILER=$(pwd)/../llvm-project/build/bin/clang EXTRA_CFLAGS="-target riscv64" + make -C rvv-intrinsic-generator run-policy-api-testing run-bfloat16-policy-api-testing run-vector-crypto-policy-api-testing COMPILER=$(pwd)/../llvm-project/build/bin/clang EXTRA_CFLAGS="-target riscv64" - name: Run compilation test, overloaded intrinsics (non-default policy) run: | - make -C rvv-intrinsic-generator run-policy-overloaded-api-testing COMPILER=$(pwd)/../llvm-project/build/bin/clang EXTRA_CFLAGS="-target riscv64" + make -C rvv-intrinsic-generator run-policy-overloaded-api-testing run-bfloat16-policy-overloaded-api-testing run-vector-crypto-policy-overloaded-api-testing COMPILER=$(pwd)/../llvm-project/build/bin/clang EXTRA_CFLAGS="-target riscv64" diff --git a/.github/workflows/gcc-compilation.yml b/.github/workflows/gcc-compilation.yml index 211591c24..028c80633 100644 --- a/.github/workflows/gcc-compilation.yml +++ b/.github/workflows/gcc-compilation.yml @@ -10,7 +10,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Prerequisites run: sudo apt-get install autoconf automake autotools-dev curl python3 python3-pip diff --git a/.github/workflows/generator.yml b/.github/workflows/generator.yml index 8a64c3121..c1c2b9aff 100644 --- a/.github/workflows/generator.yml +++ b/.github/workflows/generator.yml @@ -1,17 +1,23 @@ name: rvv-intrinsic-generator -on: [push] +on: + push: + branches: + - main + pull_request: + branches: + - main jobs: build: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.7", "3.8", "3.9", "3.10"] + python-version: ["3.9", "3.10", "3.11"] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v3 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Prerequisites diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..1d98c72b6 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,58 @@ +# Contribution Guidelines + +As an open-source project, we appreciate and encourage community members to submit patches directly to the project. To maintain a well-organized development environment, we have established standards and methods for submitting changes. This document outlines the process for submitting patches to the project, ensuring that your contribution is swiftly incorporated into the codebase. + +# Licensing + +Licensing is crucial for open-source projects, as it guarantees that the software remains available under the conditions specified by the author. + +This project employs the Creative Commons Attribution 4.0 International license, which can be found in the LICENSE file within the project's repository. + +Licensing defines the rights granted to you as an author by the copyright holder. It is essential for contributors to fully understand and accept these licensing rights. In some cases, the copyright holder may not be the contributor, such as when the contributor is working on behalf of a company. + +# Developer Certificate of Origin (DCO) +To uphold licensing criteria and demonstrate good faith, this project mandates adherence to the Developer Certificate of Origin (DCO) process. + +The DCO is an attestation appended to every contribution from each author. In the commit message of the contribution (explained in greater detail later in this document), the author adds a Signed-off-by statement, thereby accepting the DCO. + +When an author submits a patch, they affirm that they possess the right to submit the patch under the designated license. The DCO agreement is displayed below and at https://developercertificate.org. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b), or (c), and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. + +# DCO Sign-Off Methods +The DCO necessitates the inclusion of a sign-off message in the following format for each commit within the pull request: + +Signed-off-by: Stephano Cetola + +Please use your real name in the sign-off message. + +You can manually add the DCO text to your commit body or include either -s or --signoff in your standard Git commit commands. If you forget to incorporate the sign-off, you can also amend a previous commit with the sign-off by executing git commit --amend -s. If you have already pushed your changes to GitHub, you will need to force push your branch afterward using git push -f. + +Note: + +Ensure that the name and email address associated with your GitHub account match the name and email address in the Signed-off-by line of your commit message. diff --git a/README.md b/README.md index 09145f67b..c0e80dbbe 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,8 @@ Working draft for the RISC-V vector specification are under [doc/](doc/), intrin Please check out the latest intrinsics specification under [Releases](https://github.com/riscv-non-isa/rvv-intrinsic-doc/releases). +[Clang 19](https://github.com/llvm/llvm-project/blob/llvmorg-19.1.0/llvm/docs/RISCV/RISCVVectorExtension.rst) and [GCC 14](https://github.com/gcc-mirror/gcc/tree/releases/gcc-14) supports the [v1.0](https://github.com/riscv-non-isa/rvv-intrinsic-doc/tree/v1.0.x) version. + [Clang 17](https://releases.llvm.org/17.0.1/tools/clang/docs/ReleaseNotes.html) and [GCC trunk](https://github.com/gcc-mirror/gcc/tree/master) supports the [v0.12](https://github.com/riscv-non-isa/rvv-intrinsic-doc/releases/tag/v0.12.0) version, no more incompatibility will be introduced. [Clang 16](https://releases.llvm.org/16.0.0/tools/clang/docs/ReleaseNotes.html) and diff --git a/auto-generated/api-testing/vcpop.c b/auto-generated/api-testing/vcpop.c index 06e64c836..11f2e55ac 100644 --- a/auto-generated/api-testing/vcpop.c +++ b/auto-generated/api-testing/vcpop.c @@ -1,58 +1,58 @@ #include #include -unsigned int test_vcpop_m_b1(vbool1_t vs2, size_t vl) { +unsigned long test_vcpop_m_b1(vbool1_t vs2, size_t vl) { return __riscv_vcpop_m_b1(vs2, vl); } -unsigned int test_vcpop_m_b2(vbool2_t vs2, size_t vl) { +unsigned long test_vcpop_m_b2(vbool2_t vs2, size_t vl) { return __riscv_vcpop_m_b2(vs2, vl); } -unsigned int test_vcpop_m_b4(vbool4_t vs2, size_t vl) { +unsigned long test_vcpop_m_b4(vbool4_t vs2, size_t vl) { return __riscv_vcpop_m_b4(vs2, vl); } -unsigned int test_vcpop_m_b8(vbool8_t vs2, size_t vl) { +unsigned long test_vcpop_m_b8(vbool8_t vs2, size_t vl) { return __riscv_vcpop_m_b8(vs2, vl); } -unsigned int test_vcpop_m_b16(vbool16_t vs2, size_t vl) { +unsigned long test_vcpop_m_b16(vbool16_t vs2, size_t vl) { return __riscv_vcpop_m_b16(vs2, vl); } -unsigned int test_vcpop_m_b32(vbool32_t vs2, size_t vl) { +unsigned long test_vcpop_m_b32(vbool32_t vs2, size_t vl) { return __riscv_vcpop_m_b32(vs2, vl); } -unsigned int test_vcpop_m_b64(vbool64_t vs2, size_t vl) { +unsigned long test_vcpop_m_b64(vbool64_t vs2, size_t vl) { return __riscv_vcpop_m_b64(vs2, vl); } -unsigned int test_vcpop_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { +unsigned long test_vcpop_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { return __riscv_vcpop_m_b1_m(vm, vs2, vl); } -unsigned int test_vcpop_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { +unsigned long test_vcpop_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { return __riscv_vcpop_m_b2_m(vm, vs2, vl); } -unsigned int test_vcpop_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { +unsigned long test_vcpop_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { return __riscv_vcpop_m_b4_m(vm, vs2, vl); } -unsigned int test_vcpop_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { +unsigned long test_vcpop_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { return __riscv_vcpop_m_b8_m(vm, vs2, vl); } -unsigned int test_vcpop_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { +unsigned long test_vcpop_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { return __riscv_vcpop_m_b16_m(vm, vs2, vl); } -unsigned int test_vcpop_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { +unsigned long test_vcpop_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { return __riscv_vcpop_m_b32_m(vm, vs2, vl); } -unsigned int test_vcpop_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { +unsigned long test_vcpop_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { return __riscv_vcpop_m_b64_m(vm, vs2, vl); } diff --git a/auto-generated/api-testing/vfirst.c b/auto-generated/api-testing/vfirst.c index 508967af6..96e72970f 100644 --- a/auto-generated/api-testing/vfirst.c +++ b/auto-generated/api-testing/vfirst.c @@ -1,58 +1,58 @@ #include #include -int test_vfirst_m_b1(vbool1_t vs2, size_t vl) { +long test_vfirst_m_b1(vbool1_t vs2, size_t vl) { return __riscv_vfirst_m_b1(vs2, vl); } -int test_vfirst_m_b2(vbool2_t vs2, size_t vl) { +long test_vfirst_m_b2(vbool2_t vs2, size_t vl) { return __riscv_vfirst_m_b2(vs2, vl); } -int test_vfirst_m_b4(vbool4_t vs2, size_t vl) { +long test_vfirst_m_b4(vbool4_t vs2, size_t vl) { return __riscv_vfirst_m_b4(vs2, vl); } -int test_vfirst_m_b8(vbool8_t vs2, size_t vl) { +long test_vfirst_m_b8(vbool8_t vs2, size_t vl) { return __riscv_vfirst_m_b8(vs2, vl); } -int test_vfirst_m_b16(vbool16_t vs2, size_t vl) { +long test_vfirst_m_b16(vbool16_t vs2, size_t vl) { return __riscv_vfirst_m_b16(vs2, vl); } -int test_vfirst_m_b32(vbool32_t vs2, size_t vl) { +long test_vfirst_m_b32(vbool32_t vs2, size_t vl) { return __riscv_vfirst_m_b32(vs2, vl); } -int test_vfirst_m_b64(vbool64_t vs2, size_t vl) { +long test_vfirst_m_b64(vbool64_t vs2, size_t vl) { return __riscv_vfirst_m_b64(vs2, vl); } -int test_vfirst_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { +long test_vfirst_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { return __riscv_vfirst_m_b1_m(vm, vs2, vl); } -int test_vfirst_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { +long test_vfirst_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { return __riscv_vfirst_m_b2_m(vm, vs2, vl); } -int test_vfirst_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { +long test_vfirst_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { return __riscv_vfirst_m_b4_m(vm, vs2, vl); } -int test_vfirst_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { +long test_vfirst_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { return __riscv_vfirst_m_b8_m(vm, vs2, vl); } -int test_vfirst_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { +long test_vfirst_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { return __riscv_vfirst_m_b16_m(vm, vs2, vl); } -int test_vfirst_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { +long test_vfirst_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { return __riscv_vfirst_m_b32_m(vm, vs2, vl); } -int test_vfirst_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { +long test_vfirst_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { return __riscv_vfirst_m_b64_m(vm, vs2, vl); } diff --git a/auto-generated/bfloat16/api-testing/vcreate.c b/auto-generated/bfloat16/api-testing/vcreate.c new file mode 100644 index 000000000..6f3316ad1 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vcreate.c @@ -0,0 +1,177 @@ +#include +#include + +vbfloat16m2_t test_vcreate_v_bf16m1_bf16m2(vbfloat16m1_t v0, vbfloat16m1_t v1) { + return __riscv_vcreate_v_bf16m1_bf16m2(v0, v1); +} + +vbfloat16m4_t test_vcreate_v_bf16m1_bf16m4(vbfloat16m1_t v0, vbfloat16m1_t v1, + vbfloat16m1_t v2, vbfloat16m1_t v3) { + return __riscv_vcreate_v_bf16m1_bf16m4(v0, v1, v2, v3); +} + +vbfloat16m8_t test_vcreate_v_bf16m1_bf16m8(vbfloat16m1_t v0, vbfloat16m1_t v1, + vbfloat16m1_t v2, vbfloat16m1_t v3, + vbfloat16m1_t v4, vbfloat16m1_t v5, + vbfloat16m1_t v6, vbfloat16m1_t v7) { + return __riscv_vcreate_v_bf16m1_bf16m8(v0, v1, v2, v3, v4, v5, v6, v7); +} + +vbfloat16m4_t test_vcreate_v_bf16m2_bf16m4(vbfloat16m2_t v0, vbfloat16m2_t v1) { + return __riscv_vcreate_v_bf16m2_bf16m4(v0, v1); +} + +vbfloat16m8_t test_vcreate_v_bf16m2_bf16m8(vbfloat16m2_t v0, vbfloat16m2_t v1, + vbfloat16m2_t v2, vbfloat16m2_t v3) { + return __riscv_vcreate_v_bf16m2_bf16m8(v0, v1, v2, v3); +} + +vbfloat16m8_t test_vcreate_v_bf16m4_bf16m8(vbfloat16m4_t v0, vbfloat16m4_t v1) { + return __riscv_vcreate_v_bf16m4_bf16m8(v0, v1); +} + +vbfloat16mf4x2_t test_vcreate_v_bf16mf4x2(vbfloat16mf4_t v0, + vbfloat16mf4_t v1) { + return __riscv_vcreate_v_bf16mf4x2(v0, v1); +} + +vbfloat16mf4x3_t test_vcreate_v_bf16mf4x3(vbfloat16mf4_t v0, vbfloat16mf4_t v1, + vbfloat16mf4_t v2) { + return __riscv_vcreate_v_bf16mf4x3(v0, v1, v2); +} + +vbfloat16mf4x4_t test_vcreate_v_bf16mf4x4(vbfloat16mf4_t v0, vbfloat16mf4_t v1, + vbfloat16mf4_t v2, + vbfloat16mf4_t v3) { + return __riscv_vcreate_v_bf16mf4x4(v0, v1, v2, v3); +} + +vbfloat16mf4x5_t test_vcreate_v_bf16mf4x5(vbfloat16mf4_t v0, vbfloat16mf4_t v1, + vbfloat16mf4_t v2, vbfloat16mf4_t v3, + vbfloat16mf4_t v4) { + return __riscv_vcreate_v_bf16mf4x5(v0, v1, v2, v3, v4); +} + +vbfloat16mf4x6_t test_vcreate_v_bf16mf4x6(vbfloat16mf4_t v0, vbfloat16mf4_t v1, + vbfloat16mf4_t v2, vbfloat16mf4_t v3, + vbfloat16mf4_t v4, + vbfloat16mf4_t v5) { + return __riscv_vcreate_v_bf16mf4x6(v0, v1, v2, v3, v4, v5); +} + +vbfloat16mf4x7_t test_vcreate_v_bf16mf4x7(vbfloat16mf4_t v0, vbfloat16mf4_t v1, + vbfloat16mf4_t v2, vbfloat16mf4_t v3, + vbfloat16mf4_t v4, vbfloat16mf4_t v5, + vbfloat16mf4_t v6) { + return __riscv_vcreate_v_bf16mf4x7(v0, v1, v2, v3, v4, v5, v6); +} + +vbfloat16mf4x8_t test_vcreate_v_bf16mf4x8(vbfloat16mf4_t v0, vbfloat16mf4_t v1, + vbfloat16mf4_t v2, vbfloat16mf4_t v3, + vbfloat16mf4_t v4, vbfloat16mf4_t v5, + vbfloat16mf4_t v6, + vbfloat16mf4_t v7) { + return __riscv_vcreate_v_bf16mf4x8(v0, v1, v2, v3, v4, v5, v6, v7); +} + +vbfloat16mf2x2_t test_vcreate_v_bf16mf2x2(vbfloat16mf2_t v0, + vbfloat16mf2_t v1) { + return __riscv_vcreate_v_bf16mf2x2(v0, v1); +} + +vbfloat16mf2x3_t test_vcreate_v_bf16mf2x3(vbfloat16mf2_t v0, vbfloat16mf2_t v1, + vbfloat16mf2_t v2) { + return __riscv_vcreate_v_bf16mf2x3(v0, v1, v2); +} + +vbfloat16mf2x4_t test_vcreate_v_bf16mf2x4(vbfloat16mf2_t v0, vbfloat16mf2_t v1, + vbfloat16mf2_t v2, + vbfloat16mf2_t v3) { + return __riscv_vcreate_v_bf16mf2x4(v0, v1, v2, v3); +} + +vbfloat16mf2x5_t test_vcreate_v_bf16mf2x5(vbfloat16mf2_t v0, vbfloat16mf2_t v1, + vbfloat16mf2_t v2, vbfloat16mf2_t v3, + vbfloat16mf2_t v4) { + return __riscv_vcreate_v_bf16mf2x5(v0, v1, v2, v3, v4); +} + +vbfloat16mf2x6_t test_vcreate_v_bf16mf2x6(vbfloat16mf2_t v0, vbfloat16mf2_t v1, + vbfloat16mf2_t v2, vbfloat16mf2_t v3, + vbfloat16mf2_t v4, + vbfloat16mf2_t v5) { + return __riscv_vcreate_v_bf16mf2x6(v0, v1, v2, v3, v4, v5); +} + +vbfloat16mf2x7_t test_vcreate_v_bf16mf2x7(vbfloat16mf2_t v0, vbfloat16mf2_t v1, + vbfloat16mf2_t v2, vbfloat16mf2_t v3, + vbfloat16mf2_t v4, vbfloat16mf2_t v5, + vbfloat16mf2_t v6) { + return __riscv_vcreate_v_bf16mf2x7(v0, v1, v2, v3, v4, v5, v6); +} + +vbfloat16mf2x8_t test_vcreate_v_bf16mf2x8(vbfloat16mf2_t v0, vbfloat16mf2_t v1, + vbfloat16mf2_t v2, vbfloat16mf2_t v3, + vbfloat16mf2_t v4, vbfloat16mf2_t v5, + vbfloat16mf2_t v6, + vbfloat16mf2_t v7) { + return __riscv_vcreate_v_bf16mf2x8(v0, v1, v2, v3, v4, v5, v6, v7); +} + +vbfloat16m1x2_t test_vcreate_v_bf16m1x2(vbfloat16m1_t v0, vbfloat16m1_t v1) { + return __riscv_vcreate_v_bf16m1x2(v0, v1); +} + +vbfloat16m1x3_t test_vcreate_v_bf16m1x3(vbfloat16m1_t v0, vbfloat16m1_t v1, + vbfloat16m1_t v2) { + return __riscv_vcreate_v_bf16m1x3(v0, v1, v2); +} + +vbfloat16m1x4_t test_vcreate_v_bf16m1x4(vbfloat16m1_t v0, vbfloat16m1_t v1, + vbfloat16m1_t v2, vbfloat16m1_t v3) { + return __riscv_vcreate_v_bf16m1x4(v0, v1, v2, v3); +} + +vbfloat16m1x5_t test_vcreate_v_bf16m1x5(vbfloat16m1_t v0, vbfloat16m1_t v1, + vbfloat16m1_t v2, vbfloat16m1_t v3, + vbfloat16m1_t v4) { + return __riscv_vcreate_v_bf16m1x5(v0, v1, v2, v3, v4); +} + +vbfloat16m1x6_t test_vcreate_v_bf16m1x6(vbfloat16m1_t v0, vbfloat16m1_t v1, + vbfloat16m1_t v2, vbfloat16m1_t v3, + vbfloat16m1_t v4, vbfloat16m1_t v5) { + return __riscv_vcreate_v_bf16m1x6(v0, v1, v2, v3, v4, v5); +} + +vbfloat16m1x7_t test_vcreate_v_bf16m1x7(vbfloat16m1_t v0, vbfloat16m1_t v1, + vbfloat16m1_t v2, vbfloat16m1_t v3, + vbfloat16m1_t v4, vbfloat16m1_t v5, + vbfloat16m1_t v6) { + return __riscv_vcreate_v_bf16m1x7(v0, v1, v2, v3, v4, v5, v6); +} + +vbfloat16m1x8_t test_vcreate_v_bf16m1x8(vbfloat16m1_t v0, vbfloat16m1_t v1, + vbfloat16m1_t v2, vbfloat16m1_t v3, + vbfloat16m1_t v4, vbfloat16m1_t v5, + vbfloat16m1_t v6, vbfloat16m1_t v7) { + return __riscv_vcreate_v_bf16m1x8(v0, v1, v2, v3, v4, v5, v6, v7); +} + +vbfloat16m2x2_t test_vcreate_v_bf16m2x2(vbfloat16m2_t v0, vbfloat16m2_t v1) { + return __riscv_vcreate_v_bf16m2x2(v0, v1); +} + +vbfloat16m2x3_t test_vcreate_v_bf16m2x3(vbfloat16m2_t v0, vbfloat16m2_t v1, + vbfloat16m2_t v2) { + return __riscv_vcreate_v_bf16m2x3(v0, v1, v2); +} + +vbfloat16m2x4_t test_vcreate_v_bf16m2x4(vbfloat16m2_t v0, vbfloat16m2_t v1, + vbfloat16m2_t v2, vbfloat16m2_t v3) { + return __riscv_vcreate_v_bf16m2x4(v0, v1, v2, v3); +} + +vbfloat16m4x2_t test_vcreate_v_bf16m4x2(vbfloat16m4_t v0, vbfloat16m4_t v1) { + return __riscv_vcreate_v_bf16m4x2(v0, v1); +} diff --git a/auto-generated/bfloat16/api-testing/vfncvtbf16.c b/auto-generated/bfloat16/api-testing/vfncvtbf16.c new file mode 100644 index 000000000..ca33a95f1 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vfncvtbf16.c @@ -0,0 +1,92 @@ +#include +#include + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf4(vs2, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf2(vs2, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m1(vs2, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m2(vs2, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m4(vs2, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_m(vbool64_t vm, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf4_m(vm, vs2, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_m(vbool32_t vm, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf2_m(vm, vs2, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m1_m(vm, vs2, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m2_m(vm, vs2, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m4_m(vm, vs2, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_rm(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf4_rm(vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_rm(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf2_rm(vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m1_rm(vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_rm(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m2_rm(vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_rm(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m4_rm(vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16mf4_t +test_vfncvtbf16_f_f_w_bf16mf4_rm_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_rm_m(vbool32_t vm, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm_m(vbool16_t vm, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_rm_m(vbool8_t vm, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_rm_m(vbool4_t vm, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vfwcvtbf16.c b/auto-generated/bfloat16/api-testing/vfwcvtbf16.c new file mode 100644 index 000000000..762fa909d --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vfwcvtbf16.c @@ -0,0 +1,47 @@ +#include +#include + +vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32mf2(vs2, vl); +} + +vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m1(vs2, vl); +} + +vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m2(vs2, vl); +} + +vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m4(vs2, vl); +} + +vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m8(vs2, vl); +} + +vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32mf2_m(vm, vs2, vl); +} + +vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m1_m(vm, vs2, vl); +} + +vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m2_m(vm, vs2, vl); +} + +vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m4_m(vm, vs2, vl); +} + +vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m8_m(vm, vs2, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vfwmaccbf16.c b/auto-generated/bfloat16/api-testing/vfwmaccbf16.c new file mode 100644 index 000000000..5e48e1b89 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vfwmaccbf16.c @@ -0,0 +1,233 @@ +#include +#include + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2(vfloat32mf2_t vd, vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32mf2(vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32mf2(vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m1(vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m1(vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m2(vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m2(vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m4(vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m4(vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m8(vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m8(vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32mf2_m(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32mf2_m(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m1_m(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m1_m(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m2_m(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m2_m(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m4_m(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m4_m(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m8_m(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m8_m(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_rm(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_rm(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_rm(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_rm(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_rm(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_rm(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_rm(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_rm(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_rm(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32mf2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32mf2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} diff --git a/auto-generated/bfloat16/api-testing/vget.c b/auto-generated/bfloat16/api-testing/vget.c new file mode 100644 index 000000000..0eafb6875 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vget.c @@ -0,0 +1,140 @@ +#include +#include + +vbfloat16m1_t test_vget_v_bf16m2_bf16m1(vbfloat16m2_t src, size_t index) { + return __riscv_vget_v_bf16m2_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m4_bf16m1(vbfloat16m4_t src, size_t index) { + return __riscv_vget_v_bf16m4_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m8_bf16m1(vbfloat16m8_t src, size_t index) { + return __riscv_vget_v_bf16m8_bf16m1(src, 0); +} + +vbfloat16m2_t test_vget_v_bf16m4_bf16m2(vbfloat16m4_t src, size_t index) { + return __riscv_vget_v_bf16m4_bf16m2(src, 0); +} + +vbfloat16m2_t test_vget_v_bf16m8_bf16m2(vbfloat16m8_t src, size_t index) { + return __riscv_vget_v_bf16m8_bf16m2(src, 0); +} + +vbfloat16m4_t test_vget_v_bf16m8_bf16m4(vbfloat16m8_t src, size_t index) { + return __riscv_vget_v_bf16m8_bf16m4(src, 0); +} + +vbfloat16mf4_t test_vget_v_bf16mf4x2_bf16mf4(vbfloat16mf4x2_t src, + size_t index) { + return __riscv_vget_v_bf16mf4x2_bf16mf4(src, 0); +} + +vbfloat16mf4_t test_vget_v_bf16mf4x3_bf16mf4(vbfloat16mf4x3_t src, + size_t index) { + return __riscv_vget_v_bf16mf4x3_bf16mf4(src, 0); +} + +vbfloat16mf4_t test_vget_v_bf16mf4x4_bf16mf4(vbfloat16mf4x4_t src, + size_t index) { + return __riscv_vget_v_bf16mf4x4_bf16mf4(src, 0); +} + +vbfloat16mf4_t test_vget_v_bf16mf4x5_bf16mf4(vbfloat16mf4x5_t src, + size_t index) { + return __riscv_vget_v_bf16mf4x5_bf16mf4(src, 0); +} + +vbfloat16mf4_t test_vget_v_bf16mf4x6_bf16mf4(vbfloat16mf4x6_t src, + size_t index) { + return __riscv_vget_v_bf16mf4x6_bf16mf4(src, 0); +} + +vbfloat16mf4_t test_vget_v_bf16mf4x7_bf16mf4(vbfloat16mf4x7_t src, + size_t index) { + return __riscv_vget_v_bf16mf4x7_bf16mf4(src, 0); +} + +vbfloat16mf4_t test_vget_v_bf16mf4x8_bf16mf4(vbfloat16mf4x8_t src, + size_t index) { + return __riscv_vget_v_bf16mf4x8_bf16mf4(src, 0); +} + +vbfloat16mf2_t test_vget_v_bf16mf2x2_bf16mf2(vbfloat16mf2x2_t src, + size_t index) { + return __riscv_vget_v_bf16mf2x2_bf16mf2(src, 0); +} + +vbfloat16mf2_t test_vget_v_bf16mf2x3_bf16mf2(vbfloat16mf2x3_t src, + size_t index) { + return __riscv_vget_v_bf16mf2x3_bf16mf2(src, 0); +} + +vbfloat16mf2_t test_vget_v_bf16mf2x4_bf16mf2(vbfloat16mf2x4_t src, + size_t index) { + return __riscv_vget_v_bf16mf2x4_bf16mf2(src, 0); +} + +vbfloat16mf2_t test_vget_v_bf16mf2x5_bf16mf2(vbfloat16mf2x5_t src, + size_t index) { + return __riscv_vget_v_bf16mf2x5_bf16mf2(src, 0); +} + +vbfloat16mf2_t test_vget_v_bf16mf2x6_bf16mf2(vbfloat16mf2x6_t src, + size_t index) { + return __riscv_vget_v_bf16mf2x6_bf16mf2(src, 0); +} + +vbfloat16mf2_t test_vget_v_bf16mf2x7_bf16mf2(vbfloat16mf2x7_t src, + size_t index) { + return __riscv_vget_v_bf16mf2x7_bf16mf2(src, 0); +} + +vbfloat16mf2_t test_vget_v_bf16mf2x8_bf16mf2(vbfloat16mf2x8_t src, + size_t index) { + return __riscv_vget_v_bf16mf2x8_bf16mf2(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m1x2_bf16m1(vbfloat16m1x2_t src, size_t index) { + return __riscv_vget_v_bf16m1x2_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m1x3_bf16m1(vbfloat16m1x3_t src, size_t index) { + return __riscv_vget_v_bf16m1x3_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m1x4_bf16m1(vbfloat16m1x4_t src, size_t index) { + return __riscv_vget_v_bf16m1x4_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m1x5_bf16m1(vbfloat16m1x5_t src, size_t index) { + return __riscv_vget_v_bf16m1x5_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m1x6_bf16m1(vbfloat16m1x6_t src, size_t index) { + return __riscv_vget_v_bf16m1x6_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m1x7_bf16m1(vbfloat16m1x7_t src, size_t index) { + return __riscv_vget_v_bf16m1x7_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m1x8_bf16m1(vbfloat16m1x8_t src, size_t index) { + return __riscv_vget_v_bf16m1x8_bf16m1(src, 0); +} + +vbfloat16m2_t test_vget_v_bf16m2x2_bf16m2(vbfloat16m2x2_t src, size_t index) { + return __riscv_vget_v_bf16m2x2_bf16m2(src, 0); +} + +vbfloat16m2_t test_vget_v_bf16m2x3_bf16m2(vbfloat16m2x3_t src, size_t index) { + return __riscv_vget_v_bf16m2x3_bf16m2(src, 0); +} + +vbfloat16m2_t test_vget_v_bf16m2x4_bf16m2(vbfloat16m2x4_t src, size_t index) { + return __riscv_vget_v_bf16m2x4_bf16m2(src, 0); +} + +vbfloat16m4_t test_vget_v_bf16m4x2_bf16m4(vbfloat16m4x2_t src, size_t index) { + return __riscv_vget_v_bf16m4x2_bf16m4(src, 0); +} diff --git a/auto-generated/bfloat16/api-testing/vle16.c b/auto-generated/bfloat16/api-testing/vle16.c new file mode 100644 index 000000000..ba320a6cb --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vle16.c @@ -0,0 +1,53 @@ +#include +#include + +vbfloat16mf4_t test_vle16_v_bf16mf4(const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16mf4(rs1, vl); +} + +vbfloat16mf2_t test_vle16_v_bf16mf2(const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16mf2(rs1, vl); +} + +vbfloat16m1_t test_vle16_v_bf16m1(const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m1(rs1, vl); +} + +vbfloat16m2_t test_vle16_v_bf16m2(const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m2(rs1, vl); +} + +vbfloat16m4_t test_vle16_v_bf16m4(const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m4(rs1, vl); +} + +vbfloat16m8_t test_vle16_v_bf16m8(const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m8(rs1, vl); +} + +vbfloat16mf4_t test_vle16_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16_v_bf16mf4_m(vm, rs1, vl); +} + +vbfloat16mf2_t test_vle16_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16_v_bf16mf2_m(vm, rs1, vl); +} + +vbfloat16m1_t test_vle16_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16_v_bf16m1_m(vm, rs1, vl); +} + +vbfloat16m2_t test_vle16_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m2_m(vm, rs1, vl); +} + +vbfloat16m4_t test_vle16_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m4_m(vm, rs1, vl); +} + +vbfloat16m8_t test_vle16_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m8_m(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vle16ff.c b/auto-generated/bfloat16/api-testing/vle16ff.c new file mode 100644 index 000000000..f8d37c7dd --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vle16ff.c @@ -0,0 +1,62 @@ +#include +#include + +vbfloat16mf4_t test_vle16ff_v_bf16mf4(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16mf4(rs1, new_vl, vl); +} + +vbfloat16mf2_t test_vle16ff_v_bf16mf2(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16mf2(rs1, new_vl, vl); +} + +vbfloat16m1_t test_vle16ff_v_bf16m1(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m1(rs1, new_vl, vl); +} + +vbfloat16m2_t test_vle16ff_v_bf16m2(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m2(rs1, new_vl, vl); +} + +vbfloat16m4_t test_vle16ff_v_bf16m4(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m4(rs1, new_vl, vl); +} + +vbfloat16m8_t test_vle16ff_v_bf16m8(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m8(rs1, new_vl, vl); +} + +vbfloat16mf4_t test_vle16ff_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_bf16mf4_m(vm, rs1, new_vl, vl); +} + +vbfloat16mf2_t test_vle16ff_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_bf16mf2_m(vm, rs1, new_vl, vl); +} + +vbfloat16m1_t test_vle16ff_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_bf16m1_m(vm, rs1, new_vl, vl); +} + +vbfloat16m2_t test_vle16ff_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_bf16m2_m(vm, rs1, new_vl, vl); +} + +vbfloat16m4_t test_vle16ff_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_bf16m4_m(vm, rs1, new_vl, vl); +} + +vbfloat16m8_t test_vle16ff_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_bf16m8_m(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vlmul_ext_v.c b/auto-generated/bfloat16/api-testing/vlmul_ext_v.c new file mode 100644 index 000000000..75285f967 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vlmul_ext_v.c @@ -0,0 +1,62 @@ +#include +#include + +vbfloat16mf2_t test_vlmul_ext_v_bf16mf4_bf16mf2(vbfloat16mf4_t value) { + return __riscv_vlmul_ext_v_bf16mf4_bf16mf2(value); +} + +vbfloat16m1_t test_vlmul_ext_v_bf16mf4_bf16m1(vbfloat16mf4_t value) { + return __riscv_vlmul_ext_v_bf16mf4_bf16m1(value); +} + +vbfloat16m2_t test_vlmul_ext_v_bf16mf4_bf16m2(vbfloat16mf4_t value) { + return __riscv_vlmul_ext_v_bf16mf4_bf16m2(value); +} + +vbfloat16m4_t test_vlmul_ext_v_bf16mf4_bf16m4(vbfloat16mf4_t value) { + return __riscv_vlmul_ext_v_bf16mf4_bf16m4(value); +} + +vbfloat16m8_t test_vlmul_ext_v_bf16mf4_bf16m8(vbfloat16mf4_t value) { + return __riscv_vlmul_ext_v_bf16mf4_bf16m8(value); +} + +vbfloat16m1_t test_vlmul_ext_v_bf16mf2_bf16m1(vbfloat16mf2_t value) { + return __riscv_vlmul_ext_v_bf16mf2_bf16m1(value); +} + +vbfloat16m2_t test_vlmul_ext_v_bf16mf2_bf16m2(vbfloat16mf2_t value) { + return __riscv_vlmul_ext_v_bf16mf2_bf16m2(value); +} + +vbfloat16m4_t test_vlmul_ext_v_bf16mf2_bf16m4(vbfloat16mf2_t value) { + return __riscv_vlmul_ext_v_bf16mf2_bf16m4(value); +} + +vbfloat16m8_t test_vlmul_ext_v_bf16mf2_bf16m8(vbfloat16mf2_t value) { + return __riscv_vlmul_ext_v_bf16mf2_bf16m8(value); +} + +vbfloat16m2_t test_vlmul_ext_v_bf16m1_bf16m2(vbfloat16m1_t value) { + return __riscv_vlmul_ext_v_bf16m1_bf16m2(value); +} + +vbfloat16m4_t test_vlmul_ext_v_bf16m1_bf16m4(vbfloat16m1_t value) { + return __riscv_vlmul_ext_v_bf16m1_bf16m4(value); +} + +vbfloat16m8_t test_vlmul_ext_v_bf16m1_bf16m8(vbfloat16m1_t value) { + return __riscv_vlmul_ext_v_bf16m1_bf16m8(value); +} + +vbfloat16m4_t test_vlmul_ext_v_bf16m2_bf16m4(vbfloat16m2_t value) { + return __riscv_vlmul_ext_v_bf16m2_bf16m4(value); +} + +vbfloat16m8_t test_vlmul_ext_v_bf16m2_bf16m8(vbfloat16m2_t value) { + return __riscv_vlmul_ext_v_bf16m2_bf16m8(value); +} + +vbfloat16m8_t test_vlmul_ext_v_bf16m4_bf16m8(vbfloat16m4_t value) { + return __riscv_vlmul_ext_v_bf16m4_bf16m8(value); +} diff --git a/auto-generated/bfloat16/api-testing/vlmul_trunc_v.c b/auto-generated/bfloat16/api-testing/vlmul_trunc_v.c new file mode 100644 index 000000000..97495502a --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vlmul_trunc_v.c @@ -0,0 +1,62 @@ +#include +#include + +vbfloat16mf4_t test_vlmul_trunc_v_bf16mf2_bf16mf4(vbfloat16mf2_t value) { + return __riscv_vlmul_trunc_v_bf16mf2_bf16mf4(value); +} + +vbfloat16mf4_t test_vlmul_trunc_v_bf16m1_bf16mf4(vbfloat16m1_t value) { + return __riscv_vlmul_trunc_v_bf16m1_bf16mf4(value); +} + +vbfloat16mf2_t test_vlmul_trunc_v_bf16m1_bf16mf2(vbfloat16m1_t value) { + return __riscv_vlmul_trunc_v_bf16m1_bf16mf2(value); +} + +vbfloat16mf4_t test_vlmul_trunc_v_bf16m2_bf16mf4(vbfloat16m2_t value) { + return __riscv_vlmul_trunc_v_bf16m2_bf16mf4(value); +} + +vbfloat16mf2_t test_vlmul_trunc_v_bf16m2_bf16mf2(vbfloat16m2_t value) { + return __riscv_vlmul_trunc_v_bf16m2_bf16mf2(value); +} + +vbfloat16m1_t test_vlmul_trunc_v_bf16m2_bf16m1(vbfloat16m2_t value) { + return __riscv_vlmul_trunc_v_bf16m2_bf16m1(value); +} + +vbfloat16mf4_t test_vlmul_trunc_v_bf16m4_bf16mf4(vbfloat16m4_t value) { + return __riscv_vlmul_trunc_v_bf16m4_bf16mf4(value); +} + +vbfloat16mf2_t test_vlmul_trunc_v_bf16m4_bf16mf2(vbfloat16m4_t value) { + return __riscv_vlmul_trunc_v_bf16m4_bf16mf2(value); +} + +vbfloat16m1_t test_vlmul_trunc_v_bf16m4_bf16m1(vbfloat16m4_t value) { + return __riscv_vlmul_trunc_v_bf16m4_bf16m1(value); +} + +vbfloat16m2_t test_vlmul_trunc_v_bf16m4_bf16m2(vbfloat16m4_t value) { + return __riscv_vlmul_trunc_v_bf16m4_bf16m2(value); +} + +vbfloat16mf4_t test_vlmul_trunc_v_bf16m8_bf16mf4(vbfloat16m8_t value) { + return __riscv_vlmul_trunc_v_bf16m8_bf16mf4(value); +} + +vbfloat16mf2_t test_vlmul_trunc_v_bf16m8_bf16mf2(vbfloat16m8_t value) { + return __riscv_vlmul_trunc_v_bf16m8_bf16mf2(value); +} + +vbfloat16m1_t test_vlmul_trunc_v_bf16m8_bf16m1(vbfloat16m8_t value) { + return __riscv_vlmul_trunc_v_bf16m8_bf16m1(value); +} + +vbfloat16m2_t test_vlmul_trunc_v_bf16m8_bf16m2(vbfloat16m8_t value) { + return __riscv_vlmul_trunc_v_bf16m8_bf16m2(value); +} + +vbfloat16m4_t test_vlmul_trunc_v_bf16m8_bf16m4(vbfloat16m8_t value) { + return __riscv_vlmul_trunc_v_bf16m8_bf16m4(value); +} diff --git a/auto-generated/bfloat16/api-testing/vloxei16.c b/auto-generated/bfloat16/api-testing/vloxei16.c new file mode 100644 index 000000000..86b076156 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vloxei16.c @@ -0,0 +1,62 @@ +#include +#include + +vbfloat16mf4_t test_vloxei16_v_bf16mf4(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16mf4(rs1, rs2, vl); +} + +vbfloat16mf2_t test_vloxei16_v_bf16mf2(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16mf2(rs1, rs2, vl); +} + +vbfloat16m1_t test_vloxei16_v_bf16m1(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m1(rs1, rs2, vl); +} + +vbfloat16m2_t test_vloxei16_v_bf16m2(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m2(rs1, rs2, vl); +} + +vbfloat16m4_t test_vloxei16_v_bf16m4(const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m4(rs1, rs2, vl); +} + +vbfloat16m8_t test_vloxei16_v_bf16m8(const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m8(rs1, rs2, vl); +} + +vbfloat16mf4_t test_vloxei16_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_bf16mf4_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vloxei16_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_bf16mf2_m(vm, rs1, rs2, vl); +} + +vbfloat16m1_t test_vloxei16_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_bf16m1_m(vm, rs1, rs2, vl); +} + +vbfloat16m2_t test_vloxei16_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_bf16m2_m(vm, rs1, rs2, vl); +} + +vbfloat16m4_t test_vloxei16_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_bf16m4_m(vm, rs1, rs2, vl); +} + +vbfloat16m8_t test_vloxei16_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1, + vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_bf16m8_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vloxseg2ei16.c b/auto-generated/bfloat16/api-testing/vloxseg2ei16.c new file mode 100644 index 000000000..1ee5de8c9 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vloxseg2ei16.c @@ -0,0 +1,54 @@ +#include +#include + +vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16mf4x2(rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16mf2x2(rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m1x2(rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m2x2(rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2(const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m4x2(rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16mf4x2_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16mf2x2_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m1x2_m(vm, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m2x2_m(vm, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m4x2_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vloxseg3ei16.c b/auto-generated/bfloat16/api-testing/vloxseg3ei16.c new file mode 100644 index 000000000..0f8f21676 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vloxseg3ei16.c @@ -0,0 +1,44 @@ +#include +#include + +vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16mf4x3(rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16mf2x3(rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxseg3ei16_v_bf16m1x3(rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vloxseg3ei16_v_bf16m2x3(rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16mf4x3_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16mf2x3_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16m1x3_m(vm, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16m2x3_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vloxseg4ei16.c b/auto-generated/bfloat16/api-testing/vloxseg4ei16.c new file mode 100644 index 000000000..535f74024 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vloxseg4ei16.c @@ -0,0 +1,44 @@ +#include +#include + +vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16mf4x4(rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16mf2x4(rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxseg4ei16_v_bf16m1x4(rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vloxseg4ei16_v_bf16m2x4(rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16mf4x4_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16mf2x4_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16m1x4_m(vm, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16m2x4_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vloxseg5ei16.c b/auto-generated/bfloat16/api-testing/vloxseg5ei16.c new file mode 100644 index 000000000..294b40dee --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vloxseg5ei16.c @@ -0,0 +1,34 @@ +#include +#include + +vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_bf16mf4x5(rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_bf16mf2x5(rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxseg5ei16_v_bf16m1x5(rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_bf16mf4x5_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_bf16mf2x5_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_bf16m1x5_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vloxseg6ei16.c b/auto-generated/bfloat16/api-testing/vloxseg6ei16.c new file mode 100644 index 000000000..17c579abf --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vloxseg6ei16.c @@ -0,0 +1,34 @@ +#include +#include + +vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_bf16mf4x6(rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_bf16mf2x6(rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxseg6ei16_v_bf16m1x6(rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_bf16mf4x6_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_bf16mf2x6_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_bf16m1x6_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vloxseg7ei16.c b/auto-generated/bfloat16/api-testing/vloxseg7ei16.c new file mode 100644 index 000000000..f0e04f0f8 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vloxseg7ei16.c @@ -0,0 +1,34 @@ +#include +#include + +vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_bf16mf4x7(rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_bf16mf2x7(rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxseg7ei16_v_bf16m1x7(rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_bf16mf4x7_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_bf16mf2x7_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_bf16m1x7_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vloxseg8ei16.c b/auto-generated/bfloat16/api-testing/vloxseg8ei16.c new file mode 100644 index 000000000..19a53eadd --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vloxseg8ei16.c @@ -0,0 +1,34 @@ +#include +#include + +vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_bf16mf4x8(rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_bf16mf2x8(rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxseg8ei16_v_bf16m1x8(rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_bf16mf4x8_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_bf16mf2x8_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_bf16m1x8_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vlse16.c b/auto-generated/bfloat16/api-testing/vlse16.c new file mode 100644 index 000000000..6ec4a53a8 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vlse16.c @@ -0,0 +1,62 @@ +#include +#include + +vbfloat16mf4_t test_vlse16_v_bf16mf4(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16mf4(rs1, rs2, vl); +} + +vbfloat16mf2_t test_vlse16_v_bf16mf2(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16mf2(rs1, rs2, vl); +} + +vbfloat16m1_t test_vlse16_v_bf16m1(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m1(rs1, rs2, vl); +} + +vbfloat16m2_t test_vlse16_v_bf16m2(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m2(rs1, rs2, vl); +} + +vbfloat16m4_t test_vlse16_v_bf16m4(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m4(rs1, rs2, vl); +} + +vbfloat16m8_t test_vlse16_v_bf16m8(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m8(rs1, rs2, vl); +} + +vbfloat16mf4_t test_vlse16_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_bf16mf4_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vlse16_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_bf16mf2_m(vm, rs1, rs2, vl); +} + +vbfloat16m1_t test_vlse16_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_bf16m1_m(vm, rs1, rs2, vl); +} + +vbfloat16m2_t test_vlse16_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_bf16m2_m(vm, rs1, rs2, vl); +} + +vbfloat16m4_t test_vlse16_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_bf16m4_m(vm, rs1, rs2, vl); +} + +vbfloat16m8_t test_vlse16_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_bf16m8_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vlseg2e16.c b/auto-generated/bfloat16/api-testing/vlseg2e16.c new file mode 100644 index 000000000..1db59e83f --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vlseg2e16.c @@ -0,0 +1,47 @@ +#include +#include + +vbfloat16mf4x2_t test_vlseg2e16_v_bf16mf4x2(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16mf4x2(rs1, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16_v_bf16mf2x2(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16mf2x2(rs1, vl); +} + +vbfloat16m1x2_t test_vlseg2e16_v_bf16m1x2(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16m1x2(rs1, vl); +} + +vbfloat16m2x2_t test_vlseg2e16_v_bf16m2x2(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16m2x2(rs1, vl); +} + +vbfloat16m4x2_t test_vlseg2e16_v_bf16m4x2(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16m4x2(rs1, vl); +} + +vbfloat16mf4x2_t test_vlseg2e16_v_bf16mf4x2_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg2e16_v_bf16mf4x2_m(vm, rs1, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16_v_bf16mf2x2_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg2e16_v_bf16mf2x2_m(vm, rs1, vl); +} + +vbfloat16m1x2_t test_vlseg2e16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg2e16_v_bf16m1x2_m(vm, rs1, vl); +} + +vbfloat16m2x2_t test_vlseg2e16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg2e16_v_bf16m2x2_m(vm, rs1, vl); +} + +vbfloat16m4x2_t test_vlseg2e16_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg2e16_v_bf16m4x2_m(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vlseg2e16ff.c b/auto-generated/bfloat16/api-testing/vlseg2e16ff.c new file mode 100644 index 000000000..cd0e7e381 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vlseg2e16ff.c @@ -0,0 +1,52 @@ +#include +#include + +vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg2e16ff_v_bf16mf4x2(rs1, new_vl, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg2e16ff_v_bf16mf2x2(rs1, new_vl, vl); +} + +vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m1x2(rs1, new_vl, vl); +} + +vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m2x2(rs1, new_vl, vl); +} + +vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m4x2(rs1, new_vl, vl); +} + +vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16mf4x2_m(vm, rs1, new_vl, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16mf2x2_m(vm, rs1, new_vl, vl); +} + +vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m1x2_m(vm, rs1, new_vl, vl); +} + +vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m2x2_m(vm, rs1, new_vl, vl); +} + +vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m4x2_m(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vlseg3e16.c b/auto-generated/bfloat16/api-testing/vlseg3e16.c new file mode 100644 index 000000000..52e98dcc0 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vlseg3e16.c @@ -0,0 +1,38 @@ +#include +#include + +vbfloat16mf4x3_t test_vlseg3e16_v_bf16mf4x3(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16mf4x3(rs1, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16_v_bf16mf2x3(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16mf2x3(rs1, vl); +} + +vbfloat16m1x3_t test_vlseg3e16_v_bf16m1x3(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16m1x3(rs1, vl); +} + +vbfloat16m2x3_t test_vlseg3e16_v_bf16m2x3(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16m2x3(rs1, vl); +} + +vbfloat16mf4x3_t test_vlseg3e16_v_bf16mf4x3_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg3e16_v_bf16mf4x3_m(vm, rs1, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16_v_bf16mf2x3_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg3e16_v_bf16mf2x3_m(vm, rs1, vl); +} + +vbfloat16m1x3_t test_vlseg3e16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg3e16_v_bf16m1x3_m(vm, rs1, vl); +} + +vbfloat16m2x3_t test_vlseg3e16_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg3e16_v_bf16m2x3_m(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vlseg3e16ff.c b/auto-generated/bfloat16/api-testing/vlseg3e16ff.c new file mode 100644 index 000000000..623bb8f18 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vlseg3e16ff.c @@ -0,0 +1,42 @@ +#include +#include + +vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg3e16ff_v_bf16mf4x3(rs1, new_vl, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg3e16ff_v_bf16mf2x3(rs1, new_vl, vl); +} + +vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg3e16ff_v_bf16m1x3(rs1, new_vl, vl); +} + +vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg3e16ff_v_bf16m2x3(rs1, new_vl, vl); +} + +vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16mf4x3_m(vm, rs1, new_vl, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16mf2x3_m(vm, rs1, new_vl, vl); +} + +vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16m1x3_m(vm, rs1, new_vl, vl); +} + +vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16m2x3_m(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vlseg4e16.c b/auto-generated/bfloat16/api-testing/vlseg4e16.c new file mode 100644 index 000000000..b0d4a9411 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vlseg4e16.c @@ -0,0 +1,38 @@ +#include +#include + +vbfloat16mf4x4_t test_vlseg4e16_v_bf16mf4x4(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16mf4x4(rs1, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16_v_bf16mf2x4(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16mf2x4(rs1, vl); +} + +vbfloat16m1x4_t test_vlseg4e16_v_bf16m1x4(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16m1x4(rs1, vl); +} + +vbfloat16m2x4_t test_vlseg4e16_v_bf16m2x4(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16m2x4(rs1, vl); +} + +vbfloat16mf4x4_t test_vlseg4e16_v_bf16mf4x4_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg4e16_v_bf16mf4x4_m(vm, rs1, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16_v_bf16mf2x4_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg4e16_v_bf16mf2x4_m(vm, rs1, vl); +} + +vbfloat16m1x4_t test_vlseg4e16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg4e16_v_bf16m1x4_m(vm, rs1, vl); +} + +vbfloat16m2x4_t test_vlseg4e16_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg4e16_v_bf16m2x4_m(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vlseg4e16ff.c b/auto-generated/bfloat16/api-testing/vlseg4e16ff.c new file mode 100644 index 000000000..7e76bc96a --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vlseg4e16ff.c @@ -0,0 +1,42 @@ +#include +#include + +vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg4e16ff_v_bf16mf4x4(rs1, new_vl, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg4e16ff_v_bf16mf2x4(rs1, new_vl, vl); +} + +vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg4e16ff_v_bf16m1x4(rs1, new_vl, vl); +} + +vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg4e16ff_v_bf16m2x4(rs1, new_vl, vl); +} + +vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16mf4x4_m(vm, rs1, new_vl, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16mf2x4_m(vm, rs1, new_vl, vl); +} + +vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16m1x4_m(vm, rs1, new_vl, vl); +} + +vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16m2x4_m(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vlseg5e16.c b/auto-generated/bfloat16/api-testing/vlseg5e16.c new file mode 100644 index 000000000..a36ca8401 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vlseg5e16.c @@ -0,0 +1,29 @@ +#include +#include + +vbfloat16mf4x5_t test_vlseg5e16_v_bf16mf4x5(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_v_bf16mf4x5(rs1, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16_v_bf16mf2x5(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_v_bf16mf2x5(rs1, vl); +} + +vbfloat16m1x5_t test_vlseg5e16_v_bf16m1x5(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_v_bf16m1x5(rs1, vl); +} + +vbfloat16mf4x5_t test_vlseg5e16_v_bf16mf4x5_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg5e16_v_bf16mf4x5_m(vm, rs1, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16_v_bf16mf2x5_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg5e16_v_bf16mf2x5_m(vm, rs1, vl); +} + +vbfloat16m1x5_t test_vlseg5e16_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg5e16_v_bf16m1x5_m(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vlseg5e16ff.c b/auto-generated/bfloat16/api-testing/vlseg5e16ff.c new file mode 100644 index 000000000..ae2f49900 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vlseg5e16ff.c @@ -0,0 +1,32 @@ +#include +#include + +vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg5e16ff_v_bf16mf4x5(rs1, new_vl, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg5e16ff_v_bf16mf2x5(rs1, new_vl, vl); +} + +vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg5e16ff_v_bf16m1x5(rs1, new_vl, vl); +} + +vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_bf16mf4x5_m(vm, rs1, new_vl, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_bf16mf2x5_m(vm, rs1, new_vl, vl); +} + +vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_bf16m1x5_m(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vlseg6e16.c b/auto-generated/bfloat16/api-testing/vlseg6e16.c new file mode 100644 index 000000000..fc96aabaf --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vlseg6e16.c @@ -0,0 +1,29 @@ +#include +#include + +vbfloat16mf4x6_t test_vlseg6e16_v_bf16mf4x6(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_v_bf16mf4x6(rs1, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16_v_bf16mf2x6(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_v_bf16mf2x6(rs1, vl); +} + +vbfloat16m1x6_t test_vlseg6e16_v_bf16m1x6(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_v_bf16m1x6(rs1, vl); +} + +vbfloat16mf4x6_t test_vlseg6e16_v_bf16mf4x6_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg6e16_v_bf16mf4x6_m(vm, rs1, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16_v_bf16mf2x6_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg6e16_v_bf16mf2x6_m(vm, rs1, vl); +} + +vbfloat16m1x6_t test_vlseg6e16_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg6e16_v_bf16m1x6_m(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vlseg6e16ff.c b/auto-generated/bfloat16/api-testing/vlseg6e16ff.c new file mode 100644 index 000000000..600f39ed0 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vlseg6e16ff.c @@ -0,0 +1,32 @@ +#include +#include + +vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg6e16ff_v_bf16mf4x6(rs1, new_vl, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg6e16ff_v_bf16mf2x6(rs1, new_vl, vl); +} + +vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg6e16ff_v_bf16m1x6(rs1, new_vl, vl); +} + +vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_bf16mf4x6_m(vm, rs1, new_vl, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_bf16mf2x6_m(vm, rs1, new_vl, vl); +} + +vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_bf16m1x6_m(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vlseg7e16.c b/auto-generated/bfloat16/api-testing/vlseg7e16.c new file mode 100644 index 000000000..530d67b29 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vlseg7e16.c @@ -0,0 +1,29 @@ +#include +#include + +vbfloat16mf4x7_t test_vlseg7e16_v_bf16mf4x7(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_v_bf16mf4x7(rs1, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16_v_bf16mf2x7(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_v_bf16mf2x7(rs1, vl); +} + +vbfloat16m1x7_t test_vlseg7e16_v_bf16m1x7(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_v_bf16m1x7(rs1, vl); +} + +vbfloat16mf4x7_t test_vlseg7e16_v_bf16mf4x7_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg7e16_v_bf16mf4x7_m(vm, rs1, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16_v_bf16mf2x7_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg7e16_v_bf16mf2x7_m(vm, rs1, vl); +} + +vbfloat16m1x7_t test_vlseg7e16_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg7e16_v_bf16m1x7_m(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vlseg7e16ff.c b/auto-generated/bfloat16/api-testing/vlseg7e16ff.c new file mode 100644 index 000000000..918c59ae5 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vlseg7e16ff.c @@ -0,0 +1,32 @@ +#include +#include + +vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg7e16ff_v_bf16mf4x7(rs1, new_vl, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg7e16ff_v_bf16mf2x7(rs1, new_vl, vl); +} + +vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg7e16ff_v_bf16m1x7(rs1, new_vl, vl); +} + +vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_bf16mf4x7_m(vm, rs1, new_vl, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_bf16mf2x7_m(vm, rs1, new_vl, vl); +} + +vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_bf16m1x7_m(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vlseg8e16.c b/auto-generated/bfloat16/api-testing/vlseg8e16.c new file mode 100644 index 000000000..4a3576db4 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vlseg8e16.c @@ -0,0 +1,29 @@ +#include +#include + +vbfloat16mf4x8_t test_vlseg8e16_v_bf16mf4x8(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_v_bf16mf4x8(rs1, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16_v_bf16mf2x8(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_v_bf16mf2x8(rs1, vl); +} + +vbfloat16m1x8_t test_vlseg8e16_v_bf16m1x8(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_v_bf16m1x8(rs1, vl); +} + +vbfloat16mf4x8_t test_vlseg8e16_v_bf16mf4x8_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg8e16_v_bf16mf4x8_m(vm, rs1, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16_v_bf16mf2x8_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg8e16_v_bf16mf2x8_m(vm, rs1, vl); +} + +vbfloat16m1x8_t test_vlseg8e16_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg8e16_v_bf16m1x8_m(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vlseg8e16ff.c b/auto-generated/bfloat16/api-testing/vlseg8e16ff.c new file mode 100644 index 000000000..16d539e22 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vlseg8e16ff.c @@ -0,0 +1,32 @@ +#include +#include + +vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg8e16ff_v_bf16mf4x8(rs1, new_vl, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg8e16ff_v_bf16mf2x8(rs1, new_vl, vl); +} + +vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg8e16ff_v_bf16m1x8(rs1, new_vl, vl); +} + +vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_bf16mf4x8_m(vm, rs1, new_vl, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_bf16mf2x8_m(vm, rs1, new_vl, vl); +} + +vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_bf16m1x8_m(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vlsseg2e16.c b/auto-generated/bfloat16/api-testing/vlsseg2e16.c new file mode 100644 index 000000000..444299755 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vlsseg2e16.c @@ -0,0 +1,52 @@ +#include +#include + +vbfloat16mf4x2_t test_vlsseg2e16_v_bf16mf4x2(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_v_bf16mf4x2(rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vlsseg2e16_v_bf16mf2x2(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_v_bf16mf2x2(rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vlsseg2e16_v_bf16m1x2(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_v_bf16m1x2(rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vlsseg2e16_v_bf16m2x2(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_v_bf16m2x2(rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vlsseg2e16_v_bf16m4x2(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_v_bf16m4x2(rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vlsseg2e16_v_bf16mf4x2_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16mf4x2_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vlsseg2e16_v_bf16mf2x2_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16mf2x2_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vlsseg2e16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16m1x2_m(vm, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vlsseg2e16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16m2x2_m(vm, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vlsseg2e16_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16m4x2_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vlsseg3e16.c b/auto-generated/bfloat16/api-testing/vlsseg3e16.c new file mode 100644 index 000000000..02b38c6ea --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vlsseg3e16.c @@ -0,0 +1,42 @@ +#include +#include + +vbfloat16mf4x3_t test_vlsseg3e16_v_bf16mf4x3(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_v_bf16mf4x3(rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vlsseg3e16_v_bf16mf2x3(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_v_bf16mf2x3(rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vlsseg3e16_v_bf16m1x3(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_v_bf16m1x3(rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vlsseg3e16_v_bf16m2x3(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_v_bf16m2x3(rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vlsseg3e16_v_bf16mf4x3_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_bf16mf4x3_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vlsseg3e16_v_bf16mf2x3_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_bf16mf2x3_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vlsseg3e16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_bf16m1x3_m(vm, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vlsseg3e16_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_bf16m2x3_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vlsseg4e16.c b/auto-generated/bfloat16/api-testing/vlsseg4e16.c new file mode 100644 index 000000000..629326dc1 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vlsseg4e16.c @@ -0,0 +1,42 @@ +#include +#include + +vbfloat16mf4x4_t test_vlsseg4e16_v_bf16mf4x4(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_v_bf16mf4x4(rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vlsseg4e16_v_bf16mf2x4(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_v_bf16mf2x4(rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vlsseg4e16_v_bf16m1x4(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_v_bf16m1x4(rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vlsseg4e16_v_bf16m2x4(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_v_bf16m2x4(rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vlsseg4e16_v_bf16mf4x4_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_bf16mf4x4_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vlsseg4e16_v_bf16mf2x4_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_bf16mf2x4_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vlsseg4e16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_bf16m1x4_m(vm, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vlsseg4e16_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_bf16m2x4_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vlsseg5e16.c b/auto-generated/bfloat16/api-testing/vlsseg5e16.c new file mode 100644 index 000000000..82f62d786 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vlsseg5e16.c @@ -0,0 +1,32 @@ +#include +#include + +vbfloat16mf4x5_t test_vlsseg5e16_v_bf16mf4x5(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg5e16_v_bf16mf4x5(rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vlsseg5e16_v_bf16mf2x5(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg5e16_v_bf16mf2x5(rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vlsseg5e16_v_bf16m1x5(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg5e16_v_bf16m1x5(rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vlsseg5e16_v_bf16mf4x5_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_bf16mf4x5_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vlsseg5e16_v_bf16mf2x5_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_bf16mf2x5_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vlsseg5e16_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_bf16m1x5_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vlsseg6e16.c b/auto-generated/bfloat16/api-testing/vlsseg6e16.c new file mode 100644 index 000000000..aa9e7083a --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vlsseg6e16.c @@ -0,0 +1,32 @@ +#include +#include + +vbfloat16mf4x6_t test_vlsseg6e16_v_bf16mf4x6(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg6e16_v_bf16mf4x6(rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vlsseg6e16_v_bf16mf2x6(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg6e16_v_bf16mf2x6(rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vlsseg6e16_v_bf16m1x6(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg6e16_v_bf16m1x6(rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vlsseg6e16_v_bf16mf4x6_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_bf16mf4x6_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vlsseg6e16_v_bf16mf2x6_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_bf16mf2x6_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vlsseg6e16_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_bf16m1x6_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vlsseg7e16.c b/auto-generated/bfloat16/api-testing/vlsseg7e16.c new file mode 100644 index 000000000..01b6fd2d8 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vlsseg7e16.c @@ -0,0 +1,32 @@ +#include +#include + +vbfloat16mf4x7_t test_vlsseg7e16_v_bf16mf4x7(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg7e16_v_bf16mf4x7(rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vlsseg7e16_v_bf16mf2x7(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg7e16_v_bf16mf2x7(rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vlsseg7e16_v_bf16m1x7(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg7e16_v_bf16m1x7(rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vlsseg7e16_v_bf16mf4x7_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_bf16mf4x7_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vlsseg7e16_v_bf16mf2x7_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_bf16mf2x7_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vlsseg7e16_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_bf16m1x7_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vlsseg8e16.c b/auto-generated/bfloat16/api-testing/vlsseg8e16.c new file mode 100644 index 000000000..65b6e157e --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vlsseg8e16.c @@ -0,0 +1,32 @@ +#include +#include + +vbfloat16mf4x8_t test_vlsseg8e16_v_bf16mf4x8(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg8e16_v_bf16mf4x8(rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vlsseg8e16_v_bf16mf2x8(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg8e16_v_bf16mf2x8(rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vlsseg8e16_v_bf16m1x8(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg8e16_v_bf16m1x8(rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vlsseg8e16_v_bf16mf4x8_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_bf16mf4x8_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vlsseg8e16_v_bf16mf2x8_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_bf16mf2x8_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vlsseg8e16_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_bf16m1x8_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vluxei16.c b/auto-generated/bfloat16/api-testing/vluxei16.c new file mode 100644 index 000000000..47f978c37 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vluxei16.c @@ -0,0 +1,62 @@ +#include +#include + +vbfloat16mf4_t test_vluxei16_v_bf16mf4(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16mf4(rs1, rs2, vl); +} + +vbfloat16mf2_t test_vluxei16_v_bf16mf2(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16mf2(rs1, rs2, vl); +} + +vbfloat16m1_t test_vluxei16_v_bf16m1(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m1(rs1, rs2, vl); +} + +vbfloat16m2_t test_vluxei16_v_bf16m2(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m2(rs1, rs2, vl); +} + +vbfloat16m4_t test_vluxei16_v_bf16m4(const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m4(rs1, rs2, vl); +} + +vbfloat16m8_t test_vluxei16_v_bf16m8(const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m8(rs1, rs2, vl); +} + +vbfloat16mf4_t test_vluxei16_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_bf16mf4_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vluxei16_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_bf16mf2_m(vm, rs1, rs2, vl); +} + +vbfloat16m1_t test_vluxei16_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_bf16m1_m(vm, rs1, rs2, vl); +} + +vbfloat16m2_t test_vluxei16_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_bf16m2_m(vm, rs1, rs2, vl); +} + +vbfloat16m4_t test_vluxei16_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_bf16m4_m(vm, rs1, rs2, vl); +} + +vbfloat16m8_t test_vluxei16_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1, + vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_bf16m8_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vluxseg2ei16.c b/auto-generated/bfloat16/api-testing/vluxseg2ei16.c new file mode 100644 index 000000000..67ab0184d --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vluxseg2ei16.c @@ -0,0 +1,54 @@ +#include +#include + +vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16mf4x2(rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16mf2x2(rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m1x2(rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m2x2(rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2(const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m4x2(rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16mf4x2_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16mf2x2_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m1x2_m(vm, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m2x2_m(vm, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m4x2_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vluxseg3ei16.c b/auto-generated/bfloat16/api-testing/vluxseg3ei16.c new file mode 100644 index 000000000..3f43a614d --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vluxseg3ei16.c @@ -0,0 +1,44 @@ +#include +#include + +vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16mf4x3(rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16mf2x3(rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxseg3ei16_v_bf16m1x3(rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vluxseg3ei16_v_bf16m2x3(rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16mf4x3_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16mf2x3_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16m1x3_m(vm, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16m2x3_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vluxseg4ei16.c b/auto-generated/bfloat16/api-testing/vluxseg4ei16.c new file mode 100644 index 000000000..942ccef90 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vluxseg4ei16.c @@ -0,0 +1,44 @@ +#include +#include + +vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16mf4x4(rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16mf2x4(rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxseg4ei16_v_bf16m1x4(rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vluxseg4ei16_v_bf16m2x4(rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16mf4x4_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16mf2x4_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16m1x4_m(vm, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16m2x4_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vluxseg5ei16.c b/auto-generated/bfloat16/api-testing/vluxseg5ei16.c new file mode 100644 index 000000000..81f396ba6 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vluxseg5ei16.c @@ -0,0 +1,34 @@ +#include +#include + +vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_bf16mf4x5(rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_bf16mf2x5(rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxseg5ei16_v_bf16m1x5(rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_bf16mf4x5_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_bf16mf2x5_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_bf16m1x5_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vluxseg6ei16.c b/auto-generated/bfloat16/api-testing/vluxseg6ei16.c new file mode 100644 index 000000000..6f0aaa56b --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vluxseg6ei16.c @@ -0,0 +1,34 @@ +#include +#include + +vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_bf16mf4x6(rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_bf16mf2x6(rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxseg6ei16_v_bf16m1x6(rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_bf16mf4x6_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_bf16mf2x6_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_bf16m1x6_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vluxseg7ei16.c b/auto-generated/bfloat16/api-testing/vluxseg7ei16.c new file mode 100644 index 000000000..dd1c46108 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vluxseg7ei16.c @@ -0,0 +1,34 @@ +#include +#include + +vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_bf16mf4x7(rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_bf16mf2x7(rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxseg7ei16_v_bf16m1x7(rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_bf16mf4x7_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_bf16mf2x7_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_bf16m1x7_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vluxseg8ei16.c b/auto-generated/bfloat16/api-testing/vluxseg8ei16.c new file mode 100644 index 000000000..ea3d2be1e --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vluxseg8ei16.c @@ -0,0 +1,34 @@ +#include +#include + +vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_bf16mf4x8(rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_bf16mf2x8(rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxseg8ei16_v_bf16m1x8(rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_bf16mf4x8_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_bf16mf2x8_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_bf16m1x8_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vmerge.c b/auto-generated/bfloat16/api-testing/vmerge.c new file mode 100644 index 000000000..871f44021 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vmerge.c @@ -0,0 +1,32 @@ +#include +#include + +vbfloat16mf4_t test_vmerge_vvm_bf16mf4(vbfloat16mf4_t vs2, vbfloat16mf4_t vs1, + vbool64_t v0, size_t vl) { + return __riscv_vmerge_vvm_bf16mf4(vs2, vs1, v0, vl); +} + +vbfloat16mf2_t test_vmerge_vvm_bf16mf2(vbfloat16mf2_t vs2, vbfloat16mf2_t vs1, + vbool32_t v0, size_t vl) { + return __riscv_vmerge_vvm_bf16mf2(vs2, vs1, v0, vl); +} + +vbfloat16m1_t test_vmerge_vvm_bf16m1(vbfloat16m1_t vs2, vbfloat16m1_t vs1, + vbool16_t v0, size_t vl) { + return __riscv_vmerge_vvm_bf16m1(vs2, vs1, v0, vl); +} + +vbfloat16m2_t test_vmerge_vvm_bf16m2(vbfloat16m2_t vs2, vbfloat16m2_t vs1, + vbool8_t v0, size_t vl) { + return __riscv_vmerge_vvm_bf16m2(vs2, vs1, v0, vl); +} + +vbfloat16m4_t test_vmerge_vvm_bf16m4(vbfloat16m4_t vs2, vbfloat16m4_t vs1, + vbool4_t v0, size_t vl) { + return __riscv_vmerge_vvm_bf16m4(vs2, vs1, v0, vl); +} + +vbfloat16m8_t test_vmerge_vvm_bf16m8(vbfloat16m8_t vs2, vbfloat16m8_t vs1, + vbool2_t v0, size_t vl) { + return __riscv_vmerge_vvm_bf16m8(vs2, vs1, v0, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vmv.c b/auto-generated/bfloat16/api-testing/vmv.c new file mode 100644 index 000000000..a9f0cfb12 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vmv.c @@ -0,0 +1,26 @@ +#include +#include + +vbfloat16mf4_t test_vmv_v_v_bf16mf4(vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vmv_v_v_bf16mf4(vs1, vl); +} + +vbfloat16mf2_t test_vmv_v_v_bf16mf2(vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vmv_v_v_bf16mf2(vs1, vl); +} + +vbfloat16m1_t test_vmv_v_v_bf16m1(vbfloat16m1_t vs1, size_t vl) { + return __riscv_vmv_v_v_bf16m1(vs1, vl); +} + +vbfloat16m2_t test_vmv_v_v_bf16m2(vbfloat16m2_t vs1, size_t vl) { + return __riscv_vmv_v_v_bf16m2(vs1, vl); +} + +vbfloat16m4_t test_vmv_v_v_bf16m4(vbfloat16m4_t vs1, size_t vl) { + return __riscv_vmv_v_v_bf16m4(vs1, vl); +} + +vbfloat16m8_t test_vmv_v_v_bf16m8(vbfloat16m8_t vs1, size_t vl) { + return __riscv_vmv_v_v_bf16m8(vs1, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vreinterpret.c b/auto-generated/bfloat16/api-testing/vreinterpret.c new file mode 100644 index 000000000..44975a392 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vreinterpret.c @@ -0,0 +1,98 @@ +#include +#include + +vbfloat16mf4_t test_vreinterpret_v_i16mf4_bf16mf4(vint16mf4_t src) { + return __riscv_vreinterpret_v_i16mf4_bf16mf4(src); +} + +vbfloat16mf2_t test_vreinterpret_v_i16mf2_bf16mf2(vint16mf2_t src) { + return __riscv_vreinterpret_v_i16mf2_bf16mf2(src); +} + +vbfloat16m1_t test_vreinterpret_v_i16m1_bf16m1(vint16m1_t src) { + return __riscv_vreinterpret_v_i16m1_bf16m1(src); +} + +vbfloat16m2_t test_vreinterpret_v_i16m2_bf16m2(vint16m2_t src) { + return __riscv_vreinterpret_v_i16m2_bf16m2(src); +} + +vbfloat16m4_t test_vreinterpret_v_i16m4_bf16m4(vint16m4_t src) { + return __riscv_vreinterpret_v_i16m4_bf16m4(src); +} + +vbfloat16m8_t test_vreinterpret_v_i16m8_bf16m8(vint16m8_t src) { + return __riscv_vreinterpret_v_i16m8_bf16m8(src); +} + +vbfloat16mf4_t test_vreinterpret_v_u16mf4_bf16mf4(vuint16mf4_t src) { + return __riscv_vreinterpret_v_u16mf4_bf16mf4(src); +} + +vbfloat16mf2_t test_vreinterpret_v_u16mf2_bf16mf2(vuint16mf2_t src) { + return __riscv_vreinterpret_v_u16mf2_bf16mf2(src); +} + +vbfloat16m1_t test_vreinterpret_v_u16m1_bf16m1(vuint16m1_t src) { + return __riscv_vreinterpret_v_u16m1_bf16m1(src); +} + +vbfloat16m2_t test_vreinterpret_v_u16m2_bf16m2(vuint16m2_t src) { + return __riscv_vreinterpret_v_u16m2_bf16m2(src); +} + +vbfloat16m4_t test_vreinterpret_v_u16m4_bf16m4(vuint16m4_t src) { + return __riscv_vreinterpret_v_u16m4_bf16m4(src); +} + +vbfloat16m8_t test_vreinterpret_v_u16m8_bf16m8(vuint16m8_t src) { + return __riscv_vreinterpret_v_u16m8_bf16m8(src); +} + +vint16mf4_t test_vreinterpret_v_bf16mf4_i16mf4(vbfloat16mf4_t src) { + return __riscv_vreinterpret_v_bf16mf4_i16mf4(src); +} + +vint16mf2_t test_vreinterpret_v_bf16mf2_i16mf2(vbfloat16mf2_t src) { + return __riscv_vreinterpret_v_bf16mf2_i16mf2(src); +} + +vint16m1_t test_vreinterpret_v_bf16m1_i16m1(vbfloat16m1_t src) { + return __riscv_vreinterpret_v_bf16m1_i16m1(src); +} + +vint16m2_t test_vreinterpret_v_bf16m2_i16m2(vbfloat16m2_t src) { + return __riscv_vreinterpret_v_bf16m2_i16m2(src); +} + +vint16m4_t test_vreinterpret_v_bf16m4_i16m4(vbfloat16m4_t src) { + return __riscv_vreinterpret_v_bf16m4_i16m4(src); +} + +vint16m8_t test_vreinterpret_v_bf16m8_i16m8(vbfloat16m8_t src) { + return __riscv_vreinterpret_v_bf16m8_i16m8(src); +} + +vuint16mf4_t test_vreinterpret_v_bf16mf4_u16mf4(vbfloat16mf4_t src) { + return __riscv_vreinterpret_v_bf16mf4_u16mf4(src); +} + +vuint16mf2_t test_vreinterpret_v_bf16mf2_u16mf2(vbfloat16mf2_t src) { + return __riscv_vreinterpret_v_bf16mf2_u16mf2(src); +} + +vuint16m1_t test_vreinterpret_v_bf16m1_u16m1(vbfloat16m1_t src) { + return __riscv_vreinterpret_v_bf16m1_u16m1(src); +} + +vuint16m2_t test_vreinterpret_v_bf16m2_u16m2(vbfloat16m2_t src) { + return __riscv_vreinterpret_v_bf16m2_u16m2(src); +} + +vuint16m4_t test_vreinterpret_v_bf16m4_u16m4(vbfloat16m4_t src) { + return __riscv_vreinterpret_v_bf16m4_u16m4(src); +} + +vuint16m8_t test_vreinterpret_v_bf16m8_u16m8(vbfloat16m8_t src) { + return __riscv_vreinterpret_v_bf16m8_u16m8(src); +} diff --git a/auto-generated/bfloat16/api-testing/vse16.c b/auto-generated/bfloat16/api-testing/vse16.c new file mode 100644 index 000000000..fa8c4d20f --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vse16.c @@ -0,0 +1,56 @@ +#include +#include + +void test_vse16_v_bf16mf4(__bf16 *rs1, vbfloat16mf4_t vs3, size_t vl) { + return __riscv_vse16_v_bf16mf4(rs1, vs3, vl); +} + +void test_vse16_v_bf16mf2(__bf16 *rs1, vbfloat16mf2_t vs3, size_t vl) { + return __riscv_vse16_v_bf16mf2(rs1, vs3, vl); +} + +void test_vse16_v_bf16m1(__bf16 *rs1, vbfloat16m1_t vs3, size_t vl) { + return __riscv_vse16_v_bf16m1(rs1, vs3, vl); +} + +void test_vse16_v_bf16m2(__bf16 *rs1, vbfloat16m2_t vs3, size_t vl) { + return __riscv_vse16_v_bf16m2(rs1, vs3, vl); +} + +void test_vse16_v_bf16m4(__bf16 *rs1, vbfloat16m4_t vs3, size_t vl) { + return __riscv_vse16_v_bf16m4(rs1, vs3, vl); +} + +void test_vse16_v_bf16m8(__bf16 *rs1, vbfloat16m8_t vs3, size_t vl) { + return __riscv_vse16_v_bf16m8(rs1, vs3, vl); +} + +void test_vse16_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, vbfloat16mf4_t vs3, + size_t vl) { + return __riscv_vse16_v_bf16mf4_m(vm, rs1, vs3, vl); +} + +void test_vse16_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, vbfloat16mf2_t vs3, + size_t vl) { + return __riscv_vse16_v_bf16mf2_m(vm, rs1, vs3, vl); +} + +void test_vse16_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1_t vs3, + size_t vl) { + return __riscv_vse16_v_bf16m1_m(vm, rs1, vs3, vl); +} + +void test_vse16_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, vbfloat16m2_t vs3, + size_t vl) { + return __riscv_vse16_v_bf16m2_m(vm, rs1, vs3, vl); +} + +void test_vse16_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, vbfloat16m4_t vs3, + size_t vl) { + return __riscv_vse16_v_bf16m4_m(vm, rs1, vs3, vl); +} + +void test_vse16_v_bf16m8_m(vbool2_t vm, __bf16 *rs1, vbfloat16m8_t vs3, + size_t vl) { + return __riscv_vse16_v_bf16m8_m(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vset.c b/auto-generated/bfloat16/api-testing/vset.c new file mode 100644 index 000000000..df82323a4 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vset.c @@ -0,0 +1,171 @@ +#include +#include + +vbfloat16m2_t test_vset_v_bf16m1_bf16m2(vbfloat16m2_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset_v_bf16m1_bf16m2(dest, 0, value); +} + +vbfloat16m4_t test_vset_v_bf16m1_bf16m4(vbfloat16m4_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset_v_bf16m1_bf16m4(dest, 0, value); +} + +vbfloat16m4_t test_vset_v_bf16m2_bf16m4(vbfloat16m4_t dest, size_t index, + vbfloat16m2_t value) { + return __riscv_vset_v_bf16m2_bf16m4(dest, 0, value); +} + +vbfloat16m8_t test_vset_v_bf16m1_bf16m8(vbfloat16m8_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset_v_bf16m1_bf16m8(dest, 0, value); +} + +vbfloat16m8_t test_vset_v_bf16m2_bf16m8(vbfloat16m8_t dest, size_t index, + vbfloat16m2_t value) { + return __riscv_vset_v_bf16m2_bf16m8(dest, 0, value); +} + +vbfloat16m8_t test_vset_v_bf16m4_bf16m8(vbfloat16m8_t dest, size_t index, + vbfloat16m4_t value) { + return __riscv_vset_v_bf16m4_bf16m8(dest, 0, value); +} + +vbfloat16mf4x2_t test_vset_v_bf16mf4_bf16mf4x2(vbfloat16mf4x2_t dest, + size_t index, + vbfloat16mf4_t value) { + return __riscv_vset_v_bf16mf4_bf16mf4x2(dest, 0, value); +} + +vbfloat16mf4x3_t test_vset_v_bf16mf4_bf16mf4x3(vbfloat16mf4x3_t dest, + size_t index, + vbfloat16mf4_t value) { + return __riscv_vset_v_bf16mf4_bf16mf4x3(dest, 0, value); +} + +vbfloat16mf4x4_t test_vset_v_bf16mf4_bf16mf4x4(vbfloat16mf4x4_t dest, + size_t index, + vbfloat16mf4_t value) { + return __riscv_vset_v_bf16mf4_bf16mf4x4(dest, 0, value); +} + +vbfloat16mf4x5_t test_vset_v_bf16mf4_bf16mf4x5(vbfloat16mf4x5_t dest, + size_t index, + vbfloat16mf4_t value) { + return __riscv_vset_v_bf16mf4_bf16mf4x5(dest, 0, value); +} + +vbfloat16mf4x6_t test_vset_v_bf16mf4_bf16mf4x6(vbfloat16mf4x6_t dest, + size_t index, + vbfloat16mf4_t value) { + return __riscv_vset_v_bf16mf4_bf16mf4x6(dest, 0, value); +} + +vbfloat16mf4x7_t test_vset_v_bf16mf4_bf16mf4x7(vbfloat16mf4x7_t dest, + size_t index, + vbfloat16mf4_t value) { + return __riscv_vset_v_bf16mf4_bf16mf4x7(dest, 0, value); +} + +vbfloat16mf4x8_t test_vset_v_bf16mf4_bf16mf4x8(vbfloat16mf4x8_t dest, + size_t index, + vbfloat16mf4_t value) { + return __riscv_vset_v_bf16mf4_bf16mf4x8(dest, 0, value); +} + +vbfloat16mf2x2_t test_vset_v_bf16mf2_bf16mf2x2(vbfloat16mf2x2_t dest, + size_t index, + vbfloat16mf2_t value) { + return __riscv_vset_v_bf16mf2_bf16mf2x2(dest, 0, value); +} + +vbfloat16mf2x3_t test_vset_v_bf16mf2_bf16mf2x3(vbfloat16mf2x3_t dest, + size_t index, + vbfloat16mf2_t value) { + return __riscv_vset_v_bf16mf2_bf16mf2x3(dest, 0, value); +} + +vbfloat16mf2x4_t test_vset_v_bf16mf2_bf16mf2x4(vbfloat16mf2x4_t dest, + size_t index, + vbfloat16mf2_t value) { + return __riscv_vset_v_bf16mf2_bf16mf2x4(dest, 0, value); +} + +vbfloat16mf2x5_t test_vset_v_bf16mf2_bf16mf2x5(vbfloat16mf2x5_t dest, + size_t index, + vbfloat16mf2_t value) { + return __riscv_vset_v_bf16mf2_bf16mf2x5(dest, 0, value); +} + +vbfloat16mf2x6_t test_vset_v_bf16mf2_bf16mf2x6(vbfloat16mf2x6_t dest, + size_t index, + vbfloat16mf2_t value) { + return __riscv_vset_v_bf16mf2_bf16mf2x6(dest, 0, value); +} + +vbfloat16mf2x7_t test_vset_v_bf16mf2_bf16mf2x7(vbfloat16mf2x7_t dest, + size_t index, + vbfloat16mf2_t value) { + return __riscv_vset_v_bf16mf2_bf16mf2x7(dest, 0, value); +} + +vbfloat16mf2x8_t test_vset_v_bf16mf2_bf16mf2x8(vbfloat16mf2x8_t dest, + size_t index, + vbfloat16mf2_t value) { + return __riscv_vset_v_bf16mf2_bf16mf2x8(dest, 0, value); +} + +vbfloat16m1x2_t test_vset_v_bf16m1_bf16m1x2(vbfloat16m1x2_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset_v_bf16m1_bf16m1x2(dest, 0, value); +} + +vbfloat16m1x3_t test_vset_v_bf16m1_bf16m1x3(vbfloat16m1x3_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset_v_bf16m1_bf16m1x3(dest, 0, value); +} + +vbfloat16m1x4_t test_vset_v_bf16m1_bf16m1x4(vbfloat16m1x4_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset_v_bf16m1_bf16m1x4(dest, 0, value); +} + +vbfloat16m1x5_t test_vset_v_bf16m1_bf16m1x5(vbfloat16m1x5_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset_v_bf16m1_bf16m1x5(dest, 0, value); +} + +vbfloat16m1x6_t test_vset_v_bf16m1_bf16m1x6(vbfloat16m1x6_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset_v_bf16m1_bf16m1x6(dest, 0, value); +} + +vbfloat16m1x7_t test_vset_v_bf16m1_bf16m1x7(vbfloat16m1x7_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset_v_bf16m1_bf16m1x7(dest, 0, value); +} + +vbfloat16m1x8_t test_vset_v_bf16m1_bf16m1x8(vbfloat16m1x8_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset_v_bf16m1_bf16m1x8(dest, 0, value); +} + +vbfloat16m2x2_t test_vset_v_bf16m2_bf16m2x2(vbfloat16m2x2_t dest, size_t index, + vbfloat16m2_t value) { + return __riscv_vset_v_bf16m2_bf16m2x2(dest, 0, value); +} + +vbfloat16m2x3_t test_vset_v_bf16m2_bf16m2x3(vbfloat16m2x3_t dest, size_t index, + vbfloat16m2_t value) { + return __riscv_vset_v_bf16m2_bf16m2x3(dest, 0, value); +} + +vbfloat16m2x4_t test_vset_v_bf16m2_bf16m2x4(vbfloat16m2x4_t dest, size_t index, + vbfloat16m2_t value) { + return __riscv_vset_v_bf16m2_bf16m2x4(dest, 0, value); +} + +vbfloat16m4x2_t test_vset_v_bf16m4_bf16m4x2(vbfloat16m4x2_t dest, size_t index, + vbfloat16m4_t value) { + return __riscv_vset_v_bf16m4_bf16m4x2(dest, 0, value); +} diff --git a/auto-generated/bfloat16/api-testing/vsoxei16.c b/auto-generated/bfloat16/api-testing/vsoxei16.c new file mode 100644 index 000000000..730d0d479 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vsoxei16.c @@ -0,0 +1,62 @@ +#include +#include + +void test_vsoxei16_v_bf16mf4(__bf16 *rs1, vuint16mf4_t rs2, vbfloat16mf4_t vs3, + size_t vl) { + return __riscv_vsoxei16_v_bf16mf4(rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16mf2(__bf16 *rs1, vuint16mf2_t rs2, vbfloat16mf2_t vs3, + size_t vl) { + return __riscv_vsoxei16_v_bf16mf2(rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m1(__bf16 *rs1, vuint16m1_t rs2, vbfloat16m1_t vs3, + size_t vl) { + return __riscv_vsoxei16_v_bf16m1(rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m2(__bf16 *rs1, vuint16m2_t rs2, vbfloat16m2_t vs3, + size_t vl) { + return __riscv_vsoxei16_v_bf16m2(rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m4(__bf16 *rs1, vuint16m4_t rs2, vbfloat16m4_t vs3, + size_t vl) { + return __riscv_vsoxei16_v_bf16m4(rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m8(__bf16 *rs1, vuint16m8_t rs2, vbfloat16m8_t vs3, + size_t vl) { + return __riscv_vsoxei16_v_bf16m8(rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, vuint16mf4_t rs2, + vbfloat16mf4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_bf16mf4_m(vm, rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, vuint16mf2_t rs2, + vbfloat16mf2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_bf16mf2_m(vm, rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t rs2, + vbfloat16m1_t vs3, size_t vl) { + return __riscv_vsoxei16_v_bf16m1_m(vm, rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t rs2, + vbfloat16m2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_bf16m2_m(vm, rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, vuint16m4_t rs2, + vbfloat16m4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_bf16m4_m(vm, rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m8_m(vbool2_t vm, __bf16 *rs1, vuint16m8_t rs2, + vbfloat16m8_t vs3, size_t vl) { + return __riscv_vsoxei16_v_bf16m8_m(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vsoxseg2ei16.c b/auto-generated/bfloat16/api-testing/vsoxseg2ei16.c new file mode 100644 index 000000000..4a8bf8606 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vsoxseg2ei16.c @@ -0,0 +1,54 @@ +#include +#include + +void test_vsoxseg2ei16_v_bf16mf4x2(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_bf16mf4x2(rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16mf2x2(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_bf16mf2x2(rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16m1x2(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_bf16m1x2(rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16m2x2(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_bf16m2x2(rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16m4x2(__bf16 *rs1, vuint16m4_t vs2, + vbfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_bf16m4x2(rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x2_t vs3, + size_t vl) { + return __riscv_vsoxseg2ei16_v_bf16mf4x2_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x2_t vs3, + size_t vl) { + return __riscv_vsoxseg2ei16_v_bf16mf2x2_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_bf16m1x2_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_bf16m2x2_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, vuint16m4_t vs2, + vbfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_bf16m4x2_m(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vsoxseg3ei16.c b/auto-generated/bfloat16/api-testing/vsoxseg3ei16.c new file mode 100644 index 000000000..8eef4e9a2 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vsoxseg3ei16.c @@ -0,0 +1,44 @@ +#include +#include + +void test_vsoxseg3ei16_v_bf16mf4x3(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_bf16mf4x3(rs1, vs2, vs3, vl); +} + +void test_vsoxseg3ei16_v_bf16mf2x3(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_bf16mf2x3(rs1, vs2, vs3, vl); +} + +void test_vsoxseg3ei16_v_bf16m1x3(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_bf16m1x3(rs1, vs2, vs3, vl); +} + +void test_vsoxseg3ei16_v_bf16m2x3(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_bf16m2x3(rs1, vs2, vs3, vl); +} + +void test_vsoxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x3_t vs3, + size_t vl) { + return __riscv_vsoxseg3ei16_v_bf16mf4x3_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x3_t vs3, + size_t vl) { + return __riscv_vsoxseg3ei16_v_bf16mf2x3_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg3ei16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_bf16m1x3_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg3ei16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_bf16m2x3_m(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vsoxseg4ei16.c b/auto-generated/bfloat16/api-testing/vsoxseg4ei16.c new file mode 100644 index 000000000..f06ecf271 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vsoxseg4ei16.c @@ -0,0 +1,44 @@ +#include +#include + +void test_vsoxseg4ei16_v_bf16mf4x4(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_bf16mf4x4(rs1, vs2, vs3, vl); +} + +void test_vsoxseg4ei16_v_bf16mf2x4(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_bf16mf2x4(rs1, vs2, vs3, vl); +} + +void test_vsoxseg4ei16_v_bf16m1x4(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_bf16m1x4(rs1, vs2, vs3, vl); +} + +void test_vsoxseg4ei16_v_bf16m2x4(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_bf16m2x4(rs1, vs2, vs3, vl); +} + +void test_vsoxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x4_t vs3, + size_t vl) { + return __riscv_vsoxseg4ei16_v_bf16mf4x4_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x4_t vs3, + size_t vl) { + return __riscv_vsoxseg4ei16_v_bf16mf2x4_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg4ei16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_bf16m1x4_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg4ei16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_bf16m2x4_m(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vsoxseg5ei16.c b/auto-generated/bfloat16/api-testing/vsoxseg5ei16.c new file mode 100644 index 000000000..6f1d6f6ec --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vsoxseg5ei16.c @@ -0,0 +1,34 @@ +#include +#include + +void test_vsoxseg5ei16_v_bf16mf4x5(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_bf16mf4x5(rs1, vs2, vs3, vl); +} + +void test_vsoxseg5ei16_v_bf16mf2x5(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_bf16mf2x5(rs1, vs2, vs3, vl); +} + +void test_vsoxseg5ei16_v_bf16m1x5(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_bf16m1x5(rs1, vs2, vs3, vl); +} + +void test_vsoxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x5_t vs3, + size_t vl) { + return __riscv_vsoxseg5ei16_v_bf16mf4x5_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x5_t vs3, + size_t vl) { + return __riscv_vsoxseg5ei16_v_bf16mf2x5_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg5ei16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_bf16m1x5_m(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vsoxseg6ei16.c b/auto-generated/bfloat16/api-testing/vsoxseg6ei16.c new file mode 100644 index 000000000..50fca1660 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vsoxseg6ei16.c @@ -0,0 +1,34 @@ +#include +#include + +void test_vsoxseg6ei16_v_bf16mf4x6(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_bf16mf4x6(rs1, vs2, vs3, vl); +} + +void test_vsoxseg6ei16_v_bf16mf2x6(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_bf16mf2x6(rs1, vs2, vs3, vl); +} + +void test_vsoxseg6ei16_v_bf16m1x6(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_bf16m1x6(rs1, vs2, vs3, vl); +} + +void test_vsoxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x6_t vs3, + size_t vl) { + return __riscv_vsoxseg6ei16_v_bf16mf4x6_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x6_t vs3, + size_t vl) { + return __riscv_vsoxseg6ei16_v_bf16mf2x6_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg6ei16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_bf16m1x6_m(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vsoxseg7ei16.c b/auto-generated/bfloat16/api-testing/vsoxseg7ei16.c new file mode 100644 index 000000000..cff1eb034 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vsoxseg7ei16.c @@ -0,0 +1,34 @@ +#include +#include + +void test_vsoxseg7ei16_v_bf16mf4x7(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_bf16mf4x7(rs1, vs2, vs3, vl); +} + +void test_vsoxseg7ei16_v_bf16mf2x7(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_bf16mf2x7(rs1, vs2, vs3, vl); +} + +void test_vsoxseg7ei16_v_bf16m1x7(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_bf16m1x7(rs1, vs2, vs3, vl); +} + +void test_vsoxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x7_t vs3, + size_t vl) { + return __riscv_vsoxseg7ei16_v_bf16mf4x7_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x7_t vs3, + size_t vl) { + return __riscv_vsoxseg7ei16_v_bf16mf2x7_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg7ei16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_bf16m1x7_m(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vsoxseg8ei16.c b/auto-generated/bfloat16/api-testing/vsoxseg8ei16.c new file mode 100644 index 000000000..3dd02854a --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vsoxseg8ei16.c @@ -0,0 +1,34 @@ +#include +#include + +void test_vsoxseg8ei16_v_bf16mf4x8(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_bf16mf4x8(rs1, vs2, vs3, vl); +} + +void test_vsoxseg8ei16_v_bf16mf2x8(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_bf16mf2x8(rs1, vs2, vs3, vl); +} + +void test_vsoxseg8ei16_v_bf16m1x8(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_bf16m1x8(rs1, vs2, vs3, vl); +} + +void test_vsoxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x8_t vs3, + size_t vl) { + return __riscv_vsoxseg8ei16_v_bf16mf4x8_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x8_t vs3, + size_t vl) { + return __riscv_vsoxseg8ei16_v_bf16mf2x8_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg8ei16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_bf16m1x8_m(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vsse16.c b/auto-generated/bfloat16/api-testing/vsse16.c new file mode 100644 index 000000000..0ad6a14bf --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vsse16.c @@ -0,0 +1,62 @@ +#include +#include + +void test_vsse16_v_bf16mf4(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf4_t vs3, + size_t vl) { + return __riscv_vsse16_v_bf16mf4(rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16mf2(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf2_t vs3, + size_t vl) { + return __riscv_vsse16_v_bf16mf2(rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m1(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1_t vs3, + size_t vl) { + return __riscv_vsse16_v_bf16m1(rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m2(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m2_t vs3, + size_t vl) { + return __riscv_vsse16_v_bf16m2(rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m4(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m4_t vs3, + size_t vl) { + return __riscv_vsse16_v_bf16m4(rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m8(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m8_t vs3, + size_t vl) { + return __riscv_vsse16_v_bf16m8(rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4_t vs3, size_t vl) { + return __riscv_vsse16_v_bf16mf4_m(vm, rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2_t vs3, size_t vl) { + return __riscv_vsse16_v_bf16mf2_m(vm, rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1_t vs3, size_t vl) { + return __riscv_vsse16_v_bf16m1_m(vm, rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2_t vs3, size_t vl) { + return __riscv_vsse16_v_bf16m2_m(vm, rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m4_t vs3, size_t vl) { + return __riscv_vsse16_v_bf16m4_m(vm, rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m8_m(vbool2_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m8_t vs3, size_t vl) { + return __riscv_vsse16_v_bf16m8_m(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vsseg2e16.c b/auto-generated/bfloat16/api-testing/vsseg2e16.c new file mode 100644 index 000000000..868ca48a1 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vsseg2e16.c @@ -0,0 +1,47 @@ +#include +#include + +void test_vsseg2e16_v_bf16mf4x2(__bf16 *rs1, vbfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_bf16mf4x2(rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16mf2x2(__bf16 *rs1, vbfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_bf16mf2x2(rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16m1x2(__bf16 *rs1, vbfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_bf16m1x2(rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16m2x2(__bf16 *rs1, vbfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_bf16m2x2(rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16m4x2(__bf16 *rs1, vbfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_bf16m4x2(rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_bf16mf4x2_m(vm, rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_bf16mf2x2_m(vm, rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1x2_t vs3, + size_t vl) { + return __riscv_vsseg2e16_v_bf16m1x2_m(vm, rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vbfloat16m2x2_t vs3, + size_t vl) { + return __riscv_vsseg2e16_v_bf16m2x2_m(vm, rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, vbfloat16m4x2_t vs3, + size_t vl) { + return __riscv_vsseg2e16_v_bf16m4x2_m(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vsseg3e16.c b/auto-generated/bfloat16/api-testing/vsseg3e16.c new file mode 100644 index 000000000..2859813e2 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vsseg3e16.c @@ -0,0 +1,38 @@ +#include +#include + +void test_vsseg3e16_v_bf16mf4x3(__bf16 *rs1, vbfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_bf16mf4x3(rs1, vs3, vl); +} + +void test_vsseg3e16_v_bf16mf2x3(__bf16 *rs1, vbfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_bf16mf2x3(rs1, vs3, vl); +} + +void test_vsseg3e16_v_bf16m1x3(__bf16 *rs1, vbfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_bf16m1x3(rs1, vs3, vl); +} + +void test_vsseg3e16_v_bf16m2x3(__bf16 *rs1, vbfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_bf16m2x3(rs1, vs3, vl); +} + +void test_vsseg3e16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_bf16mf4x3_m(vm, rs1, vs3, vl); +} + +void test_vsseg3e16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_bf16mf2x3_m(vm, rs1, vs3, vl); +} + +void test_vsseg3e16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1x3_t vs3, + size_t vl) { + return __riscv_vsseg3e16_v_bf16m1x3_m(vm, rs1, vs3, vl); +} + +void test_vsseg3e16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vbfloat16m2x3_t vs3, + size_t vl) { + return __riscv_vsseg3e16_v_bf16m2x3_m(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vsseg4e16.c b/auto-generated/bfloat16/api-testing/vsseg4e16.c new file mode 100644 index 000000000..41132b932 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vsseg4e16.c @@ -0,0 +1,38 @@ +#include +#include + +void test_vsseg4e16_v_bf16mf4x4(__bf16 *rs1, vbfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_bf16mf4x4(rs1, vs3, vl); +} + +void test_vsseg4e16_v_bf16mf2x4(__bf16 *rs1, vbfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_bf16mf2x4(rs1, vs3, vl); +} + +void test_vsseg4e16_v_bf16m1x4(__bf16 *rs1, vbfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_bf16m1x4(rs1, vs3, vl); +} + +void test_vsseg4e16_v_bf16m2x4(__bf16 *rs1, vbfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_bf16m2x4(rs1, vs3, vl); +} + +void test_vsseg4e16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_bf16mf4x4_m(vm, rs1, vs3, vl); +} + +void test_vsseg4e16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_bf16mf2x4_m(vm, rs1, vs3, vl); +} + +void test_vsseg4e16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1x4_t vs3, + size_t vl) { + return __riscv_vsseg4e16_v_bf16m1x4_m(vm, rs1, vs3, vl); +} + +void test_vsseg4e16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vbfloat16m2x4_t vs3, + size_t vl) { + return __riscv_vsseg4e16_v_bf16m2x4_m(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vsseg5e16.c b/auto-generated/bfloat16/api-testing/vsseg5e16.c new file mode 100644 index 000000000..e09575ab0 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vsseg5e16.c @@ -0,0 +1,29 @@ +#include +#include + +void test_vsseg5e16_v_bf16mf4x5(__bf16 *rs1, vbfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsseg5e16_v_bf16mf4x5(rs1, vs3, vl); +} + +void test_vsseg5e16_v_bf16mf2x5(__bf16 *rs1, vbfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e16_v_bf16mf2x5(rs1, vs3, vl); +} + +void test_vsseg5e16_v_bf16m1x5(__bf16 *rs1, vbfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e16_v_bf16m1x5(rs1, vs3, vl); +} + +void test_vsseg5e16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsseg5e16_v_bf16mf4x5_m(vm, rs1, vs3, vl); +} + +void test_vsseg5e16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e16_v_bf16mf2x5_m(vm, rs1, vs3, vl); +} + +void test_vsseg5e16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1x5_t vs3, + size_t vl) { + return __riscv_vsseg5e16_v_bf16m1x5_m(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vsseg6e16.c b/auto-generated/bfloat16/api-testing/vsseg6e16.c new file mode 100644 index 000000000..5da413ae0 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vsseg6e16.c @@ -0,0 +1,29 @@ +#include +#include + +void test_vsseg6e16_v_bf16mf4x6(__bf16 *rs1, vbfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsseg6e16_v_bf16mf4x6(rs1, vs3, vl); +} + +void test_vsseg6e16_v_bf16mf2x6(__bf16 *rs1, vbfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e16_v_bf16mf2x6(rs1, vs3, vl); +} + +void test_vsseg6e16_v_bf16m1x6(__bf16 *rs1, vbfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e16_v_bf16m1x6(rs1, vs3, vl); +} + +void test_vsseg6e16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsseg6e16_v_bf16mf4x6_m(vm, rs1, vs3, vl); +} + +void test_vsseg6e16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e16_v_bf16mf2x6_m(vm, rs1, vs3, vl); +} + +void test_vsseg6e16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1x6_t vs3, + size_t vl) { + return __riscv_vsseg6e16_v_bf16m1x6_m(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vsseg7e16.c b/auto-generated/bfloat16/api-testing/vsseg7e16.c new file mode 100644 index 000000000..c0674806e --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vsseg7e16.c @@ -0,0 +1,29 @@ +#include +#include + +void test_vsseg7e16_v_bf16mf4x7(__bf16 *rs1, vbfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsseg7e16_v_bf16mf4x7(rs1, vs3, vl); +} + +void test_vsseg7e16_v_bf16mf2x7(__bf16 *rs1, vbfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e16_v_bf16mf2x7(rs1, vs3, vl); +} + +void test_vsseg7e16_v_bf16m1x7(__bf16 *rs1, vbfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e16_v_bf16m1x7(rs1, vs3, vl); +} + +void test_vsseg7e16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsseg7e16_v_bf16mf4x7_m(vm, rs1, vs3, vl); +} + +void test_vsseg7e16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e16_v_bf16mf2x7_m(vm, rs1, vs3, vl); +} + +void test_vsseg7e16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1x7_t vs3, + size_t vl) { + return __riscv_vsseg7e16_v_bf16m1x7_m(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vsseg8e16.c b/auto-generated/bfloat16/api-testing/vsseg8e16.c new file mode 100644 index 000000000..b508667c5 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vsseg8e16.c @@ -0,0 +1,29 @@ +#include +#include + +void test_vsseg8e16_v_bf16mf4x8(__bf16 *rs1, vbfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsseg8e16_v_bf16mf4x8(rs1, vs3, vl); +} + +void test_vsseg8e16_v_bf16mf2x8(__bf16 *rs1, vbfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e16_v_bf16mf2x8(rs1, vs3, vl); +} + +void test_vsseg8e16_v_bf16m1x8(__bf16 *rs1, vbfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e16_v_bf16m1x8(rs1, vs3, vl); +} + +void test_vsseg8e16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsseg8e16_v_bf16mf4x8_m(vm, rs1, vs3, vl); +} + +void test_vsseg8e16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e16_v_bf16mf2x8_m(vm, rs1, vs3, vl); +} + +void test_vsseg8e16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1x8_t vs3, + size_t vl) { + return __riscv_vsseg8e16_v_bf16m1x8_m(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vssseg2e16.c b/auto-generated/bfloat16/api-testing/vssseg2e16.c new file mode 100644 index 000000000..4befac177 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vssseg2e16.c @@ -0,0 +1,52 @@ +#include +#include + +void test_vssseg2e16_v_bf16mf4x2(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_bf16mf4x2(rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16mf2x2(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_bf16mf2x2(rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16m1x2(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x2_t vs3, + size_t vl) { + return __riscv_vssseg2e16_v_bf16m1x2(rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16m2x2(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m2x2_t vs3, + size_t vl) { + return __riscv_vssseg2e16_v_bf16m2x2(rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16m4x2(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m4x2_t vs3, + size_t vl) { + return __riscv_vssseg2e16_v_bf16m4x2(rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_bf16mf4x2_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_bf16mf2x2_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_bf16m1x2_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_bf16m2x2_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_bf16m4x2_m(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vssseg3e16.c b/auto-generated/bfloat16/api-testing/vssseg3e16.c new file mode 100644 index 000000000..329ef56ea --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vssseg3e16.c @@ -0,0 +1,42 @@ +#include +#include + +void test_vssseg3e16_v_bf16mf4x3(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_bf16mf4x3(rs1, rs2, vs3, vl); +} + +void test_vssseg3e16_v_bf16mf2x3(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_bf16mf2x3(rs1, rs2, vs3, vl); +} + +void test_vssseg3e16_v_bf16m1x3(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x3_t vs3, + size_t vl) { + return __riscv_vssseg3e16_v_bf16m1x3(rs1, rs2, vs3, vl); +} + +void test_vssseg3e16_v_bf16m2x3(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m2x3_t vs3, + size_t vl) { + return __riscv_vssseg3e16_v_bf16m2x3(rs1, rs2, vs3, vl); +} + +void test_vssseg3e16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_bf16mf4x3_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg3e16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_bf16mf2x3_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg3e16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_bf16m1x3_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg3e16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_bf16m2x3_m(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vssseg4e16.c b/auto-generated/bfloat16/api-testing/vssseg4e16.c new file mode 100644 index 000000000..91646e642 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vssseg4e16.c @@ -0,0 +1,42 @@ +#include +#include + +void test_vssseg4e16_v_bf16mf4x4(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_bf16mf4x4(rs1, rs2, vs3, vl); +} + +void test_vssseg4e16_v_bf16mf2x4(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_bf16mf2x4(rs1, rs2, vs3, vl); +} + +void test_vssseg4e16_v_bf16m1x4(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x4_t vs3, + size_t vl) { + return __riscv_vssseg4e16_v_bf16m1x4(rs1, rs2, vs3, vl); +} + +void test_vssseg4e16_v_bf16m2x4(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m2x4_t vs3, + size_t vl) { + return __riscv_vssseg4e16_v_bf16m2x4(rs1, rs2, vs3, vl); +} + +void test_vssseg4e16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_bf16mf4x4_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg4e16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_bf16mf2x4_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg4e16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_bf16m1x4_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg4e16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_bf16m2x4_m(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vssseg5e16.c b/auto-generated/bfloat16/api-testing/vssseg5e16.c new file mode 100644 index 000000000..a1e4430d3 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vssseg5e16.c @@ -0,0 +1,32 @@ +#include +#include + +void test_vssseg5e16_v_bf16mf4x5(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vssseg5e16_v_bf16mf4x5(rs1, rs2, vs3, vl); +} + +void test_vssseg5e16_v_bf16mf2x5(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e16_v_bf16mf2x5(rs1, rs2, vs3, vl); +} + +void test_vssseg5e16_v_bf16m1x5(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x5_t vs3, + size_t vl) { + return __riscv_vssseg5e16_v_bf16m1x5(rs1, rs2, vs3, vl); +} + +void test_vssseg5e16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vssseg5e16_v_bf16mf4x5_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg5e16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e16_v_bf16mf2x5_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg5e16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e16_v_bf16m1x5_m(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vssseg6e16.c b/auto-generated/bfloat16/api-testing/vssseg6e16.c new file mode 100644 index 000000000..1f807f889 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vssseg6e16.c @@ -0,0 +1,32 @@ +#include +#include + +void test_vssseg6e16_v_bf16mf4x6(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vssseg6e16_v_bf16mf4x6(rs1, rs2, vs3, vl); +} + +void test_vssseg6e16_v_bf16mf2x6(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e16_v_bf16mf2x6(rs1, rs2, vs3, vl); +} + +void test_vssseg6e16_v_bf16m1x6(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x6_t vs3, + size_t vl) { + return __riscv_vssseg6e16_v_bf16m1x6(rs1, rs2, vs3, vl); +} + +void test_vssseg6e16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vssseg6e16_v_bf16mf4x6_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg6e16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e16_v_bf16mf2x6_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg6e16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e16_v_bf16m1x6_m(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vssseg7e16.c b/auto-generated/bfloat16/api-testing/vssseg7e16.c new file mode 100644 index 000000000..0ac2db471 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vssseg7e16.c @@ -0,0 +1,32 @@ +#include +#include + +void test_vssseg7e16_v_bf16mf4x7(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vssseg7e16_v_bf16mf4x7(rs1, rs2, vs3, vl); +} + +void test_vssseg7e16_v_bf16mf2x7(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e16_v_bf16mf2x7(rs1, rs2, vs3, vl); +} + +void test_vssseg7e16_v_bf16m1x7(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x7_t vs3, + size_t vl) { + return __riscv_vssseg7e16_v_bf16m1x7(rs1, rs2, vs3, vl); +} + +void test_vssseg7e16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vssseg7e16_v_bf16mf4x7_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg7e16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e16_v_bf16mf2x7_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg7e16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e16_v_bf16m1x7_m(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vssseg8e16.c b/auto-generated/bfloat16/api-testing/vssseg8e16.c new file mode 100644 index 000000000..864344540 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vssseg8e16.c @@ -0,0 +1,32 @@ +#include +#include + +void test_vssseg8e16_v_bf16mf4x8(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vssseg8e16_v_bf16mf4x8(rs1, rs2, vs3, vl); +} + +void test_vssseg8e16_v_bf16mf2x8(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e16_v_bf16mf2x8(rs1, rs2, vs3, vl); +} + +void test_vssseg8e16_v_bf16m1x8(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x8_t vs3, + size_t vl) { + return __riscv_vssseg8e16_v_bf16m1x8(rs1, rs2, vs3, vl); +} + +void test_vssseg8e16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vssseg8e16_v_bf16mf4x8_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg8e16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e16_v_bf16mf2x8_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg8e16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e16_v_bf16m1x8_m(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vsuxei16.c b/auto-generated/bfloat16/api-testing/vsuxei16.c new file mode 100644 index 000000000..440ee93fe --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vsuxei16.c @@ -0,0 +1,62 @@ +#include +#include + +void test_vsuxei16_v_bf16mf4(__bf16 *rs1, vuint16mf4_t rs2, vbfloat16mf4_t vs3, + size_t vl) { + return __riscv_vsuxei16_v_bf16mf4(rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16mf2(__bf16 *rs1, vuint16mf2_t rs2, vbfloat16mf2_t vs3, + size_t vl) { + return __riscv_vsuxei16_v_bf16mf2(rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m1(__bf16 *rs1, vuint16m1_t rs2, vbfloat16m1_t vs3, + size_t vl) { + return __riscv_vsuxei16_v_bf16m1(rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m2(__bf16 *rs1, vuint16m2_t rs2, vbfloat16m2_t vs3, + size_t vl) { + return __riscv_vsuxei16_v_bf16m2(rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m4(__bf16 *rs1, vuint16m4_t rs2, vbfloat16m4_t vs3, + size_t vl) { + return __riscv_vsuxei16_v_bf16m4(rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m8(__bf16 *rs1, vuint16m8_t rs2, vbfloat16m8_t vs3, + size_t vl) { + return __riscv_vsuxei16_v_bf16m8(rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, vuint16mf4_t rs2, + vbfloat16mf4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_bf16mf4_m(vm, rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, vuint16mf2_t rs2, + vbfloat16mf2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_bf16mf2_m(vm, rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t rs2, + vbfloat16m1_t vs3, size_t vl) { + return __riscv_vsuxei16_v_bf16m1_m(vm, rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t rs2, + vbfloat16m2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_bf16m2_m(vm, rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, vuint16m4_t rs2, + vbfloat16m4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_bf16m4_m(vm, rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m8_m(vbool2_t vm, __bf16 *rs1, vuint16m8_t rs2, + vbfloat16m8_t vs3, size_t vl) { + return __riscv_vsuxei16_v_bf16m8_m(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vsuxseg2ei16.c b/auto-generated/bfloat16/api-testing/vsuxseg2ei16.c new file mode 100644 index 000000000..03827f92a --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vsuxseg2ei16.c @@ -0,0 +1,54 @@ +#include +#include + +void test_vsuxseg2ei16_v_bf16mf4x2(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_bf16mf4x2(rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16mf2x2(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_bf16mf2x2(rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16m1x2(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_bf16m1x2(rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16m2x2(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_bf16m2x2(rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16m4x2(__bf16 *rs1, vuint16m4_t vs2, + vbfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_bf16m4x2(rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x2_t vs3, + size_t vl) { + return __riscv_vsuxseg2ei16_v_bf16mf4x2_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x2_t vs3, + size_t vl) { + return __riscv_vsuxseg2ei16_v_bf16mf2x2_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_bf16m1x2_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_bf16m2x2_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, vuint16m4_t vs2, + vbfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_bf16m4x2_m(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vsuxseg3ei16.c b/auto-generated/bfloat16/api-testing/vsuxseg3ei16.c new file mode 100644 index 000000000..4e3698506 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vsuxseg3ei16.c @@ -0,0 +1,44 @@ +#include +#include + +void test_vsuxseg3ei16_v_bf16mf4x3(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_bf16mf4x3(rs1, vs2, vs3, vl); +} + +void test_vsuxseg3ei16_v_bf16mf2x3(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_bf16mf2x3(rs1, vs2, vs3, vl); +} + +void test_vsuxseg3ei16_v_bf16m1x3(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_bf16m1x3(rs1, vs2, vs3, vl); +} + +void test_vsuxseg3ei16_v_bf16m2x3(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_bf16m2x3(rs1, vs2, vs3, vl); +} + +void test_vsuxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x3_t vs3, + size_t vl) { + return __riscv_vsuxseg3ei16_v_bf16mf4x3_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x3_t vs3, + size_t vl) { + return __riscv_vsuxseg3ei16_v_bf16mf2x3_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg3ei16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_bf16m1x3_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg3ei16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_bf16m2x3_m(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vsuxseg4ei16.c b/auto-generated/bfloat16/api-testing/vsuxseg4ei16.c new file mode 100644 index 000000000..fda4e5e7e --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vsuxseg4ei16.c @@ -0,0 +1,44 @@ +#include +#include + +void test_vsuxseg4ei16_v_bf16mf4x4(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_bf16mf4x4(rs1, vs2, vs3, vl); +} + +void test_vsuxseg4ei16_v_bf16mf2x4(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_bf16mf2x4(rs1, vs2, vs3, vl); +} + +void test_vsuxseg4ei16_v_bf16m1x4(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_bf16m1x4(rs1, vs2, vs3, vl); +} + +void test_vsuxseg4ei16_v_bf16m2x4(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_bf16m2x4(rs1, vs2, vs3, vl); +} + +void test_vsuxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x4_t vs3, + size_t vl) { + return __riscv_vsuxseg4ei16_v_bf16mf4x4_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x4_t vs3, + size_t vl) { + return __riscv_vsuxseg4ei16_v_bf16mf2x4_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg4ei16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_bf16m1x4_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg4ei16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_bf16m2x4_m(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vsuxseg5ei16.c b/auto-generated/bfloat16/api-testing/vsuxseg5ei16.c new file mode 100644 index 000000000..07689a012 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vsuxseg5ei16.c @@ -0,0 +1,34 @@ +#include +#include + +void test_vsuxseg5ei16_v_bf16mf4x5(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_bf16mf4x5(rs1, vs2, vs3, vl); +} + +void test_vsuxseg5ei16_v_bf16mf2x5(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_bf16mf2x5(rs1, vs2, vs3, vl); +} + +void test_vsuxseg5ei16_v_bf16m1x5(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_bf16m1x5(rs1, vs2, vs3, vl); +} + +void test_vsuxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x5_t vs3, + size_t vl) { + return __riscv_vsuxseg5ei16_v_bf16mf4x5_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x5_t vs3, + size_t vl) { + return __riscv_vsuxseg5ei16_v_bf16mf2x5_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg5ei16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_bf16m1x5_m(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vsuxseg6ei16.c b/auto-generated/bfloat16/api-testing/vsuxseg6ei16.c new file mode 100644 index 000000000..8df400e67 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vsuxseg6ei16.c @@ -0,0 +1,34 @@ +#include +#include + +void test_vsuxseg6ei16_v_bf16mf4x6(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_bf16mf4x6(rs1, vs2, vs3, vl); +} + +void test_vsuxseg6ei16_v_bf16mf2x6(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_bf16mf2x6(rs1, vs2, vs3, vl); +} + +void test_vsuxseg6ei16_v_bf16m1x6(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_bf16m1x6(rs1, vs2, vs3, vl); +} + +void test_vsuxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x6_t vs3, + size_t vl) { + return __riscv_vsuxseg6ei16_v_bf16mf4x6_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x6_t vs3, + size_t vl) { + return __riscv_vsuxseg6ei16_v_bf16mf2x6_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg6ei16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_bf16m1x6_m(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vsuxseg7ei16.c b/auto-generated/bfloat16/api-testing/vsuxseg7ei16.c new file mode 100644 index 000000000..b2408d17e --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vsuxseg7ei16.c @@ -0,0 +1,34 @@ +#include +#include + +void test_vsuxseg7ei16_v_bf16mf4x7(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_bf16mf4x7(rs1, vs2, vs3, vl); +} + +void test_vsuxseg7ei16_v_bf16mf2x7(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_bf16mf2x7(rs1, vs2, vs3, vl); +} + +void test_vsuxseg7ei16_v_bf16m1x7(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_bf16m1x7(rs1, vs2, vs3, vl); +} + +void test_vsuxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x7_t vs3, + size_t vl) { + return __riscv_vsuxseg7ei16_v_bf16mf4x7_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x7_t vs3, + size_t vl) { + return __riscv_vsuxseg7ei16_v_bf16mf2x7_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg7ei16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_bf16m1x7_m(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vsuxseg8ei16.c b/auto-generated/bfloat16/api-testing/vsuxseg8ei16.c new file mode 100644 index 000000000..195aa60b8 --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vsuxseg8ei16.c @@ -0,0 +1,34 @@ +#include +#include + +void test_vsuxseg8ei16_v_bf16mf4x8(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_bf16mf4x8(rs1, vs2, vs3, vl); +} + +void test_vsuxseg8ei16_v_bf16mf2x8(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_bf16mf2x8(rs1, vs2, vs3, vl); +} + +void test_vsuxseg8ei16_v_bf16m1x8(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_bf16m1x8(rs1, vs2, vs3, vl); +} + +void test_vsuxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x8_t vs3, + size_t vl) { + return __riscv_vsuxseg8ei16_v_bf16mf4x8_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x8_t vs3, + size_t vl) { + return __riscv_vsuxseg8ei16_v_bf16mf2x8_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg8ei16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_bf16m1x8_m(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/api-testing/vundefined.c b/auto-generated/bfloat16/api-testing/vundefined.c new file mode 100644 index 000000000..13a91ae9d --- /dev/null +++ b/auto-generated/bfloat16/api-testing/vundefined.c @@ -0,0 +1,118 @@ +#include +#include + +vbfloat16mf4_t test_vundefined_bf16mf4() { + return __riscv_vundefined_bf16mf4(); +} + +vbfloat16mf2_t test_vundefined_bf16mf2() { + return __riscv_vundefined_bf16mf2(); +} + +vbfloat16m1_t test_vundefined_bf16m1() { return __riscv_vundefined_bf16m1(); } + +vbfloat16m2_t test_vundefined_bf16m2() { return __riscv_vundefined_bf16m2(); } + +vbfloat16m4_t test_vundefined_bf16m4() { return __riscv_vundefined_bf16m4(); } + +vbfloat16m8_t test_vundefined_bf16m8() { return __riscv_vundefined_bf16m8(); } + +vbfloat16mf4x2_t test_vundefined_bf16mf4x2() { + return __riscv_vundefined_bf16mf4x2(); +} + +vbfloat16mf4x3_t test_vundefined_bf16mf4x3() { + return __riscv_vundefined_bf16mf4x3(); +} + +vbfloat16mf4x4_t test_vundefined_bf16mf4x4() { + return __riscv_vundefined_bf16mf4x4(); +} + +vbfloat16mf4x5_t test_vundefined_bf16mf4x5() { + return __riscv_vundefined_bf16mf4x5(); +} + +vbfloat16mf4x6_t test_vundefined_bf16mf4x6() { + return __riscv_vundefined_bf16mf4x6(); +} + +vbfloat16mf4x7_t test_vundefined_bf16mf4x7() { + return __riscv_vundefined_bf16mf4x7(); +} + +vbfloat16mf4x8_t test_vundefined_bf16mf4x8() { + return __riscv_vundefined_bf16mf4x8(); +} + +vbfloat16mf2x2_t test_vundefined_bf16mf2x2() { + return __riscv_vundefined_bf16mf2x2(); +} + +vbfloat16mf2x3_t test_vundefined_bf16mf2x3() { + return __riscv_vundefined_bf16mf2x3(); +} + +vbfloat16mf2x4_t test_vundefined_bf16mf2x4() { + return __riscv_vundefined_bf16mf2x4(); +} + +vbfloat16mf2x5_t test_vundefined_bf16mf2x5() { + return __riscv_vundefined_bf16mf2x5(); +} + +vbfloat16mf2x6_t test_vundefined_bf16mf2x6() { + return __riscv_vundefined_bf16mf2x6(); +} + +vbfloat16mf2x7_t test_vundefined_bf16mf2x7() { + return __riscv_vundefined_bf16mf2x7(); +} + +vbfloat16mf2x8_t test_vundefined_bf16mf2x8() { + return __riscv_vundefined_bf16mf2x8(); +} + +vbfloat16m1x2_t test_vundefined_bf16m1x2() { + return __riscv_vundefined_bf16m1x2(); +} + +vbfloat16m1x3_t test_vundefined_bf16m1x3() { + return __riscv_vundefined_bf16m1x3(); +} + +vbfloat16m1x4_t test_vundefined_bf16m1x4() { + return __riscv_vundefined_bf16m1x4(); +} + +vbfloat16m1x5_t test_vundefined_bf16m1x5() { + return __riscv_vundefined_bf16m1x5(); +} + +vbfloat16m1x6_t test_vundefined_bf16m1x6() { + return __riscv_vundefined_bf16m1x6(); +} + +vbfloat16m1x7_t test_vundefined_bf16m1x7() { + return __riscv_vundefined_bf16m1x7(); +} + +vbfloat16m1x8_t test_vundefined_bf16m1x8() { + return __riscv_vundefined_bf16m1x8(); +} + +vbfloat16m2x2_t test_vundefined_bf16m2x2() { + return __riscv_vundefined_bf16m2x2(); +} + +vbfloat16m2x3_t test_vundefined_bf16m2x3() { + return __riscv_vundefined_bf16m2x3(); +} + +vbfloat16m2x4_t test_vundefined_bf16m2x4() { + return __riscv_vundefined_bf16m2x4(); +} + +vbfloat16m4x2_t test_vundefined_bf16m4x2() { + return __riscv_vundefined_bf16m4x2(); +} diff --git a/auto-generated/bfloat16/intrinsic_funcs.adoc b/auto-generated/bfloat16/intrinsic_funcs.adoc new file mode 100644 index 000000000..d1f272329 --- /dev/null +++ b/auto-generated/bfloat16/intrinsic_funcs.adoc @@ -0,0 +1,1937 @@ + +=== BFloat16 Vector Loads and Stores Intrinsics + +[[bf16-vector-unit-stride-load]] +==== Vector Unit-Stride Load Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vle16_v_bf16mf4(const __bf16 *rs1, size_t vl); +vbfloat16mf2_t __riscv_vle16_v_bf16mf2(const __bf16 *rs1, size_t vl); +vbfloat16m1_t __riscv_vle16_v_bf16m1(const __bf16 *rs1, size_t vl); +vbfloat16m2_t __riscv_vle16_v_bf16m2(const __bf16 *rs1, size_t vl); +vbfloat16m4_t __riscv_vle16_v_bf16m4(const __bf16 *rs1, size_t vl); +vbfloat16m8_t __riscv_vle16_v_bf16m8(const __bf16 *rs1, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vle16_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16mf2_t __riscv_vle16_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16m1_t __riscv_vle16_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16m2_t __riscv_vle16_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16m4_t __riscv_vle16_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16m8_t __riscv_vle16_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1, + size_t vl); +---- + +[[bf16-vector-unit-stride-store]] +==== Vector Unit-Stride Store Intrinsics + +[,c] +---- +void __riscv_vse16_v_bf16mf4(__bf16 *rs1, vbfloat16mf4_t vs3, size_t vl); +void __riscv_vse16_v_bf16mf2(__bf16 *rs1, vbfloat16mf2_t vs3, size_t vl); +void __riscv_vse16_v_bf16m1(__bf16 *rs1, vbfloat16m1_t vs3, size_t vl); +void __riscv_vse16_v_bf16m2(__bf16 *rs1, vbfloat16m2_t vs3, size_t vl); +void __riscv_vse16_v_bf16m4(__bf16 *rs1, vbfloat16m4_t vs3, size_t vl); +void __riscv_vse16_v_bf16m8(__bf16 *rs1, vbfloat16m8_t vs3, size_t vl); +// masked functions +void __riscv_vse16_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, vbfloat16mf4_t vs3, + size_t vl); +void __riscv_vse16_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, vbfloat16mf2_t vs3, + size_t vl); +void __riscv_vse16_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1_t vs3, + size_t vl); +void __riscv_vse16_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, vbfloat16m2_t vs3, + size_t vl); +void __riscv_vse16_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, vbfloat16m4_t vs3, + size_t vl); +void __riscv_vse16_v_bf16m8_m(vbool2_t vm, __bf16 *rs1, vbfloat16m8_t vs3, + size_t vl); +---- + +[[vector-strided-load]] +==== Vector Strided Load Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vlse16_v_bf16mf4(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2_t __riscv_vlse16_v_bf16mf2(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1_t __riscv_vlse16_v_bf16m1(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vlse16_v_bf16m2(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vlse16_v_bf16m4(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vlse16_v_bf16m8(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vlse16_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vlse16_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1_t __riscv_vlse16_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2_t __riscv_vlse16_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m4_t __riscv_vlse16_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m8_t __riscv_vlse16_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +---- + +[[vector-strided-store]] +==== Vector Strided Store Intrinsics + +[,c] +---- +void __riscv_vsse16_v_bf16mf4(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf4_t vs3, + size_t vl); +void __riscv_vsse16_v_bf16mf2(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf2_t vs3, + size_t vl); +void __riscv_vsse16_v_bf16m1(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1_t vs3, + size_t vl); +void __riscv_vsse16_v_bf16m2(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m2_t vs3, + size_t vl); +void __riscv_vsse16_v_bf16m4(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m4_t vs3, + size_t vl); +void __riscv_vsse16_v_bf16m8(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m8_t vs3, + size_t vl); +// masked functions +void __riscv_vsse16_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4_t vs3, size_t vl); +void __riscv_vsse16_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2_t vs3, size_t vl); +void __riscv_vsse16_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1_t vs3, size_t vl); +void __riscv_vsse16_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2_t vs3, size_t vl); +void __riscv_vsse16_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m4_t vs3, size_t vl); +void __riscv_vsse16_v_bf16m8_m(vbool2_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m8_t vs3, size_t vl); +---- + +[[vector-indexed-load]] +==== Vector Indexed Load Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vloxei16_v_bf16mf4(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2_t __riscv_vloxei16_v_bf16mf2(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1_t __riscv_vloxei16_v_bf16m1(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vloxei16_v_bf16m2(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vloxei16_v_bf16m4(const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vloxei16_v_bf16m8(const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +vbfloat16mf4_t __riscv_vluxei16_v_bf16mf4(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2_t __riscv_vluxei16_v_bf16mf2(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1_t __riscv_vluxei16_v_bf16m1(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vluxei16_v_bf16m2(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vluxei16_v_bf16m4(const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vluxei16_v_bf16m8(const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vloxei16_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vloxei16_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vloxei16_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2_t __riscv_vloxei16_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4_t __riscv_vloxei16_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +vbfloat16m8_t __riscv_vloxei16_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1, + vuint16m8_t rs2, size_t vl); +vbfloat16mf4_t __riscv_vluxei16_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vluxei16_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vluxei16_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2_t __riscv_vluxei16_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4_t __riscv_vluxei16_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +vbfloat16m8_t __riscv_vluxei16_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1, + vuint16m8_t rs2, size_t vl); +---- + +[[vector-indexed-store]] +==== Vector Indexed Store Intrinsics + +[,c] +---- +void __riscv_vsoxei16_v_bf16mf4(__bf16 *rs1, vuint16mf4_t rs2, + vbfloat16mf4_t vs3, size_t vl); +void __riscv_vsoxei16_v_bf16mf2(__bf16 *rs1, vuint16mf2_t rs2, + vbfloat16mf2_t vs3, size_t vl); +void __riscv_vsoxei16_v_bf16m1(__bf16 *rs1, vuint16m1_t rs2, vbfloat16m1_t vs3, + size_t vl); +void __riscv_vsoxei16_v_bf16m2(__bf16 *rs1, vuint16m2_t rs2, vbfloat16m2_t vs3, + size_t vl); +void __riscv_vsoxei16_v_bf16m4(__bf16 *rs1, vuint16m4_t rs2, vbfloat16m4_t vs3, + size_t vl); +void __riscv_vsoxei16_v_bf16m8(__bf16 *rs1, vuint16m8_t rs2, vbfloat16m8_t vs3, + size_t vl); +void __riscv_vsuxei16_v_bf16mf4(__bf16 *rs1, vuint16mf4_t rs2, + vbfloat16mf4_t vs3, size_t vl); +void __riscv_vsuxei16_v_bf16mf2(__bf16 *rs1, vuint16mf2_t rs2, + vbfloat16mf2_t vs3, size_t vl); +void __riscv_vsuxei16_v_bf16m1(__bf16 *rs1, vuint16m1_t rs2, vbfloat16m1_t vs3, + size_t vl); +void __riscv_vsuxei16_v_bf16m2(__bf16 *rs1, vuint16m2_t rs2, vbfloat16m2_t vs3, + size_t vl); +void __riscv_vsuxei16_v_bf16m4(__bf16 *rs1, vuint16m4_t rs2, vbfloat16m4_t vs3, + size_t vl); +void __riscv_vsuxei16_v_bf16m8(__bf16 *rs1, vuint16m8_t rs2, vbfloat16m8_t vs3, + size_t vl); +// masked functions +void __riscv_vsoxei16_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, vuint16mf4_t rs2, + vbfloat16mf4_t vs3, size_t vl); +void __riscv_vsoxei16_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, vuint16mf2_t rs2, + vbfloat16mf2_t vs3, size_t vl); +void __riscv_vsoxei16_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t rs2, + vbfloat16m1_t vs3, size_t vl); +void __riscv_vsoxei16_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t rs2, + vbfloat16m2_t vs3, size_t vl); +void __riscv_vsoxei16_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, vuint16m4_t rs2, + vbfloat16m4_t vs3, size_t vl); +void __riscv_vsoxei16_v_bf16m8_m(vbool2_t vm, __bf16 *rs1, vuint16m8_t rs2, + vbfloat16m8_t vs3, size_t vl); +void __riscv_vsuxei16_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, vuint16mf4_t rs2, + vbfloat16mf4_t vs3, size_t vl); +void __riscv_vsuxei16_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, vuint16mf2_t rs2, + vbfloat16mf2_t vs3, size_t vl); +void __riscv_vsuxei16_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t rs2, + vbfloat16m1_t vs3, size_t vl); +void __riscv_vsuxei16_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t rs2, + vbfloat16m2_t vs3, size_t vl); +void __riscv_vsuxei16_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, vuint16m4_t rs2, + vbfloat16m4_t vs3, size_t vl); +void __riscv_vsuxei16_v_bf16m8_m(vbool2_t vm, __bf16 *rs1, vuint16m8_t rs2, + vbfloat16m8_t vs3, size_t vl); +---- + +[[unit-stride-fault-only-first-loads]] +==== Unit-stride Fault-Only-First Loads Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vle16ff_v_bf16mf4(const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2_t __riscv_vle16ff_v_bf16mf2(const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1_t __riscv_vle16ff_v_bf16m1(const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m2_t __riscv_vle16ff_v_bf16m2(const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m4_t __riscv_vle16ff_v_bf16m4(const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m8_t __riscv_vle16ff_v_bf16m8(const __bf16 *rs1, size_t *new_vl, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vle16ff_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2_t __riscv_vle16ff_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1_t __riscv_vle16ff_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2_t __riscv_vle16ff_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m4_t __riscv_vle16ff_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m8_t __riscv_vle16ff_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +---- + +=== BFloat16 Vector Loads and Stores Segment Intrinsics + +[[vector-unit-stride-segment-load]] +==== Vector Unit-Stride Segment Load Intrinsics + +[,c] +---- +vbfloat16mf4x2_t __riscv_vlseg2e16_v_bf16mf4x2(const __bf16 *rs1, size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16_v_bf16mf4x3(const __bf16 *rs1, size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16_v_bf16mf4x4(const __bf16 *rs1, size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16_v_bf16mf4x5(const __bf16 *rs1, size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16_v_bf16mf4x6(const __bf16 *rs1, size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16_v_bf16mf4x7(const __bf16 *rs1, size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16_v_bf16mf4x8(const __bf16 *rs1, size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16_v_bf16mf2x2(const __bf16 *rs1, size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16_v_bf16mf2x3(const __bf16 *rs1, size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16_v_bf16mf2x4(const __bf16 *rs1, size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16_v_bf16mf2x5(const __bf16 *rs1, size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16_v_bf16mf2x6(const __bf16 *rs1, size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16_v_bf16mf2x7(const __bf16 *rs1, size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16_v_bf16mf2x8(const __bf16 *rs1, size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16_v_bf16m1x2(const __bf16 *rs1, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16_v_bf16m1x3(const __bf16 *rs1, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16_v_bf16m1x4(const __bf16 *rs1, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16_v_bf16m1x5(const __bf16 *rs1, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16_v_bf16m1x6(const __bf16 *rs1, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16_v_bf16m1x7(const __bf16 *rs1, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16_v_bf16m1x8(const __bf16 *rs1, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16_v_bf16m2x2(const __bf16 *rs1, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16_v_bf16m2x3(const __bf16 *rs1, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16_v_bf16m2x4(const __bf16 *rs1, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16_v_bf16m4x2(const __bf16 *rs1, size_t vl); +vbfloat16mf4x2_t __riscv_vlseg2e16ff_v_bf16mf4x2(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16ff_v_bf16mf4x3(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16ff_v_bf16mf4x4(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16ff_v_bf16mf4x5(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16ff_v_bf16mf4x6(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16ff_v_bf16mf4x7(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16ff_v_bf16mf4x8(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16ff_v_bf16mf2x2(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16ff_v_bf16mf2x3(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16ff_v_bf16mf2x4(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16ff_v_bf16mf2x5(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16ff_v_bf16mf2x6(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16ff_v_bf16mf2x7(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16ff_v_bf16mf2x8(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16ff_v_bf16m1x2(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16ff_v_bf16m1x3(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16ff_v_bf16m1x4(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16ff_v_bf16m1x5(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16ff_v_bf16m1x6(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16ff_v_bf16m1x7(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16ff_v_bf16m1x8(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16ff_v_bf16m2x2(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16ff_v_bf16m2x3(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16ff_v_bf16m2x4(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16ff_v_bf16m4x2(const __bf16 *rs1, + size_t *new_vl, size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vlseg2e16_v_bf16mf4x2_m(vbool64_t vm, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16_v_bf16mf4x3_m(vbool64_t vm, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16_v_bf16mf4x4_m(vbool64_t vm, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16_v_bf16mf4x5_m(vbool64_t vm, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16_v_bf16mf4x6_m(vbool64_t vm, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16_v_bf16mf4x7_m(vbool64_t vm, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16_v_bf16mf4x8_m(vbool64_t vm, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16_v_bf16mf2x2_m(vbool32_t vm, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16_v_bf16mf2x3_m(vbool32_t vm, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16_v_bf16mf2x4_m(vbool32_t vm, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16_v_bf16mf2x5_m(vbool32_t vm, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16_v_bf16mf2x6_m(vbool32_t vm, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16_v_bf16mf2x7_m(vbool32_t vm, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16_v_bf16mf2x8_m(vbool32_t vm, + const __bf16 *rs1, size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16mf4x2_t __riscv_vlseg2e16ff_v_bf16mf4x2_m(vbool64_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16ff_v_bf16mf4x3_m(vbool64_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16ff_v_bf16mf4x4_m(vbool64_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16ff_v_bf16mf4x5_m(vbool64_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16ff_v_bf16mf4x6_m(vbool64_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16ff_v_bf16mf4x7_m(vbool64_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16ff_v_bf16mf4x8_m(vbool64_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16ff_v_bf16mf2x2_m(vbool32_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16ff_v_bf16mf2x3_m(vbool32_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16ff_v_bf16mf2x4_m(vbool32_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16ff_v_bf16mf2x5_m(vbool32_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16ff_v_bf16mf2x6_m(vbool32_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16ff_v_bf16mf2x7_m(vbool32_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16ff_v_bf16mf2x8_m(vbool32_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16ff_v_bf16m1x2_m(vbool16_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16ff_v_bf16m1x3_m(vbool16_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16ff_v_bf16m1x4_m(vbool16_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16ff_v_bf16m1x5_m(vbool16_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16ff_v_bf16m1x6_m(vbool16_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16ff_v_bf16m1x7_m(vbool16_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16ff_v_bf16m1x8_m(vbool16_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16ff_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16ff_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16ff_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16ff_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +---- + +[[vecrtor-unit-stride-segment-store]] +==== Vector Unit-Stride Segment Store Intrinsics + +[,c] +---- +void __riscv_vsseg2e16_v_bf16mf4x2(__bf16 *rs1, vbfloat16mf4x2_t vs3, + size_t vl); +void __riscv_vsseg3e16_v_bf16mf4x3(__bf16 *rs1, vbfloat16mf4x3_t vs3, + size_t vl); +void __riscv_vsseg4e16_v_bf16mf4x4(__bf16 *rs1, vbfloat16mf4x4_t vs3, + size_t vl); +void __riscv_vsseg5e16_v_bf16mf4x5(__bf16 *rs1, vbfloat16mf4x5_t vs3, + size_t vl); +void __riscv_vsseg6e16_v_bf16mf4x6(__bf16 *rs1, vbfloat16mf4x6_t vs3, + size_t vl); +void __riscv_vsseg7e16_v_bf16mf4x7(__bf16 *rs1, vbfloat16mf4x7_t vs3, + size_t vl); +void __riscv_vsseg8e16_v_bf16mf4x8(__bf16 *rs1, vbfloat16mf4x8_t vs3, + size_t vl); +void __riscv_vsseg2e16_v_bf16mf2x2(__bf16 *rs1, vbfloat16mf2x2_t vs3, + size_t vl); +void __riscv_vsseg3e16_v_bf16mf2x3(__bf16 *rs1, vbfloat16mf2x3_t vs3, + size_t vl); +void __riscv_vsseg4e16_v_bf16mf2x4(__bf16 *rs1, vbfloat16mf2x4_t vs3, + size_t vl); +void __riscv_vsseg5e16_v_bf16mf2x5(__bf16 *rs1, vbfloat16mf2x5_t vs3, + size_t vl); +void __riscv_vsseg6e16_v_bf16mf2x6(__bf16 *rs1, vbfloat16mf2x6_t vs3, + size_t vl); +void __riscv_vsseg7e16_v_bf16mf2x7(__bf16 *rs1, vbfloat16mf2x7_t vs3, + size_t vl); +void __riscv_vsseg8e16_v_bf16mf2x8(__bf16 *rs1, vbfloat16mf2x8_t vs3, + size_t vl); +void __riscv_vsseg2e16_v_bf16m1x2(__bf16 *rs1, vbfloat16m1x2_t vs3, size_t vl); +void __riscv_vsseg3e16_v_bf16m1x3(__bf16 *rs1, vbfloat16m1x3_t vs3, size_t vl); +void __riscv_vsseg4e16_v_bf16m1x4(__bf16 *rs1, vbfloat16m1x4_t vs3, size_t vl); +void __riscv_vsseg5e16_v_bf16m1x5(__bf16 *rs1, vbfloat16m1x5_t vs3, size_t vl); +void __riscv_vsseg6e16_v_bf16m1x6(__bf16 *rs1, vbfloat16m1x6_t vs3, size_t vl); +void __riscv_vsseg7e16_v_bf16m1x7(__bf16 *rs1, vbfloat16m1x7_t vs3, size_t vl); +void __riscv_vsseg8e16_v_bf16m1x8(__bf16 *rs1, vbfloat16m1x8_t vs3, size_t vl); +void __riscv_vsseg2e16_v_bf16m2x2(__bf16 *rs1, vbfloat16m2x2_t vs3, size_t vl); +void __riscv_vsseg3e16_v_bf16m2x3(__bf16 *rs1, vbfloat16m2x3_t vs3, size_t vl); +void __riscv_vsseg4e16_v_bf16m2x4(__bf16 *rs1, vbfloat16m2x4_t vs3, size_t vl); +void __riscv_vsseg2e16_v_bf16m4x2(__bf16 *rs1, vbfloat16m4x2_t vs3, size_t vl); +// masked functions +void __riscv_vsseg2e16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x2_t vs3, size_t vl); +void __riscv_vsseg3e16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x3_t vs3, size_t vl); +void __riscv_vsseg4e16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x4_t vs3, size_t vl); +void __riscv_vsseg5e16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x5_t vs3, size_t vl); +void __riscv_vsseg6e16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x6_t vs3, size_t vl); +void __riscv_vsseg7e16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x7_t vs3, size_t vl); +void __riscv_vsseg8e16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x8_t vs3, size_t vl); +void __riscv_vsseg2e16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x2_t vs3, size_t vl); +void __riscv_vsseg3e16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x3_t vs3, size_t vl); +void __riscv_vsseg4e16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x4_t vs3, size_t vl); +void __riscv_vsseg5e16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x5_t vs3, size_t vl); +void __riscv_vsseg6e16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x6_t vs3, size_t vl); +void __riscv_vsseg7e16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x7_t vs3, size_t vl); +void __riscv_vsseg8e16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x8_t vs3, size_t vl); +void __riscv_vsseg2e16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, + vbfloat16m1x2_t vs3, size_t vl); +void __riscv_vsseg3e16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, + vbfloat16m1x3_t vs3, size_t vl); +void __riscv_vsseg4e16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, + vbfloat16m1x4_t vs3, size_t vl); +void __riscv_vsseg5e16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, + vbfloat16m1x5_t vs3, size_t vl); +void __riscv_vsseg6e16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, + vbfloat16m1x6_t vs3, size_t vl); +void __riscv_vsseg7e16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, + vbfloat16m1x7_t vs3, size_t vl); +void __riscv_vsseg8e16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, + vbfloat16m1x8_t vs3, size_t vl); +void __riscv_vsseg2e16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, + vbfloat16m2x2_t vs3, size_t vl); +void __riscv_vsseg3e16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, + vbfloat16m2x3_t vs3, size_t vl); +void __riscv_vsseg4e16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, + vbfloat16m2x4_t vs3, size_t vl); +void __riscv_vsseg2e16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, + vbfloat16m4x2_t vs3, size_t vl); +---- + +[[vector-strided-segment-load]] +==== Vector Strided Segment Load Intrinsics + +[,c] +---- +vbfloat16mf4x2_t __riscv_vlsseg2e16_v_bf16mf4x2(const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x3_t __riscv_vlsseg3e16_v_bf16mf4x3(const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x4_t __riscv_vlsseg4e16_v_bf16mf4x4(const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x5_t __riscv_vlsseg5e16_v_bf16mf4x5(const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x6_t __riscv_vlsseg6e16_v_bf16mf4x6(const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x7_t __riscv_vlsseg7e16_v_bf16mf4x7(const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x8_t __riscv_vlsseg8e16_v_bf16mf4x8(const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x2_t __riscv_vlsseg2e16_v_bf16mf2x2(const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x3_t __riscv_vlsseg3e16_v_bf16mf2x3(const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x4_t __riscv_vlsseg4e16_v_bf16mf2x4(const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x5_t __riscv_vlsseg5e16_v_bf16mf2x5(const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x6_t __riscv_vlsseg6e16_v_bf16mf2x6(const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x7_t __riscv_vlsseg7e16_v_bf16mf2x7(const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x8_t __riscv_vlsseg8e16_v_bf16mf2x8(const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x2_t __riscv_vlsseg2e16_v_bf16m1x2(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x3_t __riscv_vlsseg3e16_v_bf16m1x3(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x4_t __riscv_vlsseg4e16_v_bf16m1x4(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x5_t __riscv_vlsseg5e16_v_bf16m1x5(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x6_t __riscv_vlsseg6e16_v_bf16m1x6(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x7_t __riscv_vlsseg7e16_v_bf16m1x7(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x8_t __riscv_vlsseg8e16_v_bf16m1x8(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2x2_t __riscv_vlsseg2e16_v_bf16m2x2(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2x3_t __riscv_vlsseg3e16_v_bf16m2x3(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2x4_t __riscv_vlsseg4e16_v_bf16m2x4(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m4x2_t __riscv_vlsseg2e16_v_bf16m4x2(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vlsseg2e16_v_bf16mf4x2_m(vbool64_t vm, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x3_t __riscv_vlsseg3e16_v_bf16mf4x3_m(vbool64_t vm, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x4_t __riscv_vlsseg4e16_v_bf16mf4x4_m(vbool64_t vm, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x5_t __riscv_vlsseg5e16_v_bf16mf4x5_m(vbool64_t vm, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x6_t __riscv_vlsseg6e16_v_bf16mf4x6_m(vbool64_t vm, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x7_t __riscv_vlsseg7e16_v_bf16mf4x7_m(vbool64_t vm, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x8_t __riscv_vlsseg8e16_v_bf16mf4x8_m(vbool64_t vm, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x2_t __riscv_vlsseg2e16_v_bf16mf2x2_m(vbool32_t vm, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x3_t __riscv_vlsseg3e16_v_bf16mf2x3_m(vbool32_t vm, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x4_t __riscv_vlsseg4e16_v_bf16mf2x4_m(vbool32_t vm, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x5_t __riscv_vlsseg5e16_v_bf16mf2x5_m(vbool32_t vm, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x6_t __riscv_vlsseg6e16_v_bf16mf2x6_m(vbool32_t vm, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x7_t __riscv_vlsseg7e16_v_bf16mf2x7_m(vbool32_t vm, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x8_t __riscv_vlsseg8e16_v_bf16mf2x8_m(vbool32_t vm, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x2_t __riscv_vlsseg2e16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vlsseg3e16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vlsseg4e16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vlsseg5e16_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vlsseg6e16_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vlsseg7e16_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vlsseg8e16_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vlsseg2e16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vlsseg3e16_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vlsseg4e16_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vlsseg2e16_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +---- + +[[vector-strided-segment-store]] +==== Vector Strided Segment Store Intrinsics + +[,c] +---- +void __riscv_vssseg2e16_v_bf16mf4x2(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x2_t vs3, size_t vl); +void __riscv_vssseg3e16_v_bf16mf4x3(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x3_t vs3, size_t vl); +void __riscv_vssseg4e16_v_bf16mf4x4(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x4_t vs3, size_t vl); +void __riscv_vssseg5e16_v_bf16mf4x5(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x5_t vs3, size_t vl); +void __riscv_vssseg6e16_v_bf16mf4x6(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x6_t vs3, size_t vl); +void __riscv_vssseg7e16_v_bf16mf4x7(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x7_t vs3, size_t vl); +void __riscv_vssseg8e16_v_bf16mf4x8(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x8_t vs3, size_t vl); +void __riscv_vssseg2e16_v_bf16mf2x2(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x2_t vs3, size_t vl); +void __riscv_vssseg3e16_v_bf16mf2x3(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x3_t vs3, size_t vl); +void __riscv_vssseg4e16_v_bf16mf2x4(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x4_t vs3, size_t vl); +void __riscv_vssseg5e16_v_bf16mf2x5(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x5_t vs3, size_t vl); +void __riscv_vssseg6e16_v_bf16mf2x6(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x6_t vs3, size_t vl); +void __riscv_vssseg7e16_v_bf16mf2x7(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x7_t vs3, size_t vl); +void __riscv_vssseg8e16_v_bf16mf2x8(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x8_t vs3, size_t vl); +void __riscv_vssseg2e16_v_bf16m1x2(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x2_t vs3, size_t vl); +void __riscv_vssseg3e16_v_bf16m1x3(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x3_t vs3, size_t vl); +void __riscv_vssseg4e16_v_bf16m1x4(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x4_t vs3, size_t vl); +void __riscv_vssseg5e16_v_bf16m1x5(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x5_t vs3, size_t vl); +void __riscv_vssseg6e16_v_bf16m1x6(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x6_t vs3, size_t vl); +void __riscv_vssseg7e16_v_bf16m1x7(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x7_t vs3, size_t vl); +void __riscv_vssseg8e16_v_bf16m1x8(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x8_t vs3, size_t vl); +void __riscv_vssseg2e16_v_bf16m2x2(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2x2_t vs3, size_t vl); +void __riscv_vssseg3e16_v_bf16m2x3(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2x3_t vs3, size_t vl); +void __riscv_vssseg4e16_v_bf16m2x4(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2x4_t vs3, size_t vl); +void __riscv_vssseg2e16_v_bf16m4x2(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16m4x2_t vs3, size_t vl); +// masked functions +void __riscv_vssseg2e16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x2_t vs3, size_t vl); +void __riscv_vssseg3e16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x3_t vs3, size_t vl); +void __riscv_vssseg4e16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x4_t vs3, size_t vl); +void __riscv_vssseg5e16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x5_t vs3, size_t vl); +void __riscv_vssseg6e16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x6_t vs3, size_t vl); +void __riscv_vssseg7e16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x7_t vs3, size_t vl); +void __riscv_vssseg8e16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x8_t vs3, size_t vl); +void __riscv_vssseg2e16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x2_t vs3, size_t vl); +void __riscv_vssseg3e16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x3_t vs3, size_t vl); +void __riscv_vssseg4e16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x4_t vs3, size_t vl); +void __riscv_vssseg5e16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x5_t vs3, size_t vl); +void __riscv_vssseg6e16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x6_t vs3, size_t vl); +void __riscv_vssseg7e16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x7_t vs3, size_t vl); +void __riscv_vssseg8e16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x8_t vs3, size_t vl); +void __riscv_vssseg2e16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x2_t vs3, size_t vl); +void __riscv_vssseg3e16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x3_t vs3, size_t vl); +void __riscv_vssseg4e16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x4_t vs3, size_t vl); +void __riscv_vssseg5e16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x5_t vs3, size_t vl); +void __riscv_vssseg6e16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x6_t vs3, size_t vl); +void __riscv_vssseg7e16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x7_t vs3, size_t vl); +void __riscv_vssseg8e16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x8_t vs3, size_t vl); +void __riscv_vssseg2e16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2x2_t vs3, size_t vl); +void __riscv_vssseg3e16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2x3_t vs3, size_t vl); +void __riscv_vssseg4e16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2x4_t vs3, size_t vl); +void __riscv_vssseg2e16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m4x2_t vs3, size_t vl); +---- + +[[vector-indexed-segment-load]] +==== Vector Indexed Segment Load Intrinsics + +[,c] +---- +vbfloat16mf4x2_t __riscv_vloxseg2ei16_v_bf16mf4x2(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x3_t __riscv_vloxseg3ei16_v_bf16mf4x3(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x4_t __riscv_vloxseg4ei16_v_bf16mf4x4(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x5_t __riscv_vloxseg5ei16_v_bf16mf4x5(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x6_t __riscv_vloxseg6ei16_v_bf16mf4x6(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x7_t __riscv_vloxseg7ei16_v_bf16mf4x7(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x8_t __riscv_vloxseg8ei16_v_bf16mf4x8(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2x2_t __riscv_vloxseg2ei16_v_bf16mf2x2(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x3_t __riscv_vloxseg3ei16_v_bf16mf2x3(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x4_t __riscv_vloxseg4ei16_v_bf16mf2x4(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x5_t __riscv_vloxseg5ei16_v_bf16mf2x5(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x6_t __riscv_vloxseg6ei16_v_bf16mf2x6(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x7_t __riscv_vloxseg7ei16_v_bf16mf2x7(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x8_t __riscv_vloxseg8ei16_v_bf16mf2x8(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1x2_t __riscv_vloxseg2ei16_v_bf16m1x2(const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vloxseg3ei16_v_bf16m1x3(const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vloxseg4ei16_v_bf16m1x4(const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vloxseg5ei16_v_bf16m1x5(const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vloxseg6ei16_v_bf16m1x6(const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vloxseg7ei16_v_bf16m1x7(const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vloxseg8ei16_v_bf16m1x8(const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vloxseg2ei16_v_bf16m2x2(const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vloxseg3ei16_v_bf16m2x3(const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vloxseg4ei16_v_bf16m2x4(const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vloxseg2ei16_v_bf16m4x2(const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +vbfloat16mf4x2_t __riscv_vluxseg2ei16_v_bf16mf4x2(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x3_t __riscv_vluxseg3ei16_v_bf16mf4x3(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x4_t __riscv_vluxseg4ei16_v_bf16mf4x4(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x5_t __riscv_vluxseg5ei16_v_bf16mf4x5(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x6_t __riscv_vluxseg6ei16_v_bf16mf4x6(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x7_t __riscv_vluxseg7ei16_v_bf16mf4x7(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x8_t __riscv_vluxseg8ei16_v_bf16mf4x8(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2x2_t __riscv_vluxseg2ei16_v_bf16mf2x2(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x3_t __riscv_vluxseg3ei16_v_bf16mf2x3(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x4_t __riscv_vluxseg4ei16_v_bf16mf2x4(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x5_t __riscv_vluxseg5ei16_v_bf16mf2x5(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x6_t __riscv_vluxseg6ei16_v_bf16mf2x6(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x7_t __riscv_vluxseg7ei16_v_bf16mf2x7(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x8_t __riscv_vluxseg8ei16_v_bf16mf2x8(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1x2_t __riscv_vluxseg2ei16_v_bf16m1x2(const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vluxseg3ei16_v_bf16m1x3(const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vluxseg4ei16_v_bf16m1x4(const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vluxseg5ei16_v_bf16m1x5(const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vluxseg6ei16_v_bf16m1x6(const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vluxseg7ei16_v_bf16m1x7(const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vluxseg8ei16_v_bf16m1x8(const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vluxseg2ei16_v_bf16m2x2(const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vluxseg3ei16_v_bf16m2x3(const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vluxseg4ei16_v_bf16m2x4(const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vluxseg2ei16_v_bf16m4x2(const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vloxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vloxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vloxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vloxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vloxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vloxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vloxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vloxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vloxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vloxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vloxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vloxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vloxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vloxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vloxseg2ei16_v_bf16m1x2_m(vbool16_t vm, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vloxseg3ei16_v_bf16m1x3_m(vbool16_t vm, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vloxseg4ei16_v_bf16m1x4_m(vbool16_t vm, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vloxseg5ei16_v_bf16m1x5_m(vbool16_t vm, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vloxseg6ei16_v_bf16m1x6_m(vbool16_t vm, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vloxseg7ei16_v_bf16m1x7_m(vbool16_t vm, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vloxseg8ei16_v_bf16m1x8_m(vbool16_t vm, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vloxseg2ei16_v_bf16m2x2_m(vbool8_t vm, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vloxseg3ei16_v_bf16m2x3_m(vbool8_t vm, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vloxseg4ei16_v_bf16m2x4_m(vbool8_t vm, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vloxseg2ei16_v_bf16m4x2_m(vbool4_t vm, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +vbfloat16mf4x2_t __riscv_vluxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vluxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vluxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vluxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vluxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vluxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vluxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vluxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vluxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vluxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vluxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vluxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vluxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vluxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vluxseg2ei16_v_bf16m1x2_m(vbool16_t vm, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vluxseg3ei16_v_bf16m1x3_m(vbool16_t vm, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vluxseg4ei16_v_bf16m1x4_m(vbool16_t vm, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vluxseg5ei16_v_bf16m1x5_m(vbool16_t vm, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vluxseg6ei16_v_bf16m1x6_m(vbool16_t vm, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vluxseg7ei16_v_bf16m1x7_m(vbool16_t vm, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vluxseg8ei16_v_bf16m1x8_m(vbool16_t vm, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vluxseg2ei16_v_bf16m2x2_m(vbool8_t vm, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vluxseg3ei16_v_bf16m2x3_m(vbool8_t vm, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vluxseg4ei16_v_bf16m2x4_m(vbool8_t vm, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vluxseg2ei16_v_bf16m4x2_m(vbool4_t vm, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +---- + +[[vector-indexed-segment-store]] +==== Vector Indexed Segment Store Intrinsics + +[,c] +---- +void __riscv_vsoxseg2ei16_v_bf16mf4x2(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x2_t vs3, size_t vl); +void __riscv_vsoxseg3ei16_v_bf16mf4x3(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x3_t vs3, size_t vl); +void __riscv_vsoxseg4ei16_v_bf16mf4x4(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x4_t vs3, size_t vl); +void __riscv_vsoxseg5ei16_v_bf16mf4x5(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x5_t vs3, size_t vl); +void __riscv_vsoxseg6ei16_v_bf16mf4x6(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x6_t vs3, size_t vl); +void __riscv_vsoxseg7ei16_v_bf16mf4x7(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x7_t vs3, size_t vl); +void __riscv_vsoxseg8ei16_v_bf16mf4x8(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x8_t vs3, size_t vl); +void __riscv_vsoxseg2ei16_v_bf16mf2x2(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x2_t vs3, size_t vl); +void __riscv_vsoxseg3ei16_v_bf16mf2x3(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x3_t vs3, size_t vl); +void __riscv_vsoxseg4ei16_v_bf16mf2x4(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x4_t vs3, size_t vl); +void __riscv_vsoxseg5ei16_v_bf16mf2x5(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x5_t vs3, size_t vl); +void __riscv_vsoxseg6ei16_v_bf16mf2x6(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x6_t vs3, size_t vl); +void __riscv_vsoxseg7ei16_v_bf16mf2x7(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x7_t vs3, size_t vl); +void __riscv_vsoxseg8ei16_v_bf16mf2x8(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x8_t vs3, size_t vl); +void __riscv_vsoxseg2ei16_v_bf16m1x2(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x2_t vs3, size_t vl); +void __riscv_vsoxseg3ei16_v_bf16m1x3(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x3_t vs3, size_t vl); +void __riscv_vsoxseg4ei16_v_bf16m1x4(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x4_t vs3, size_t vl); +void __riscv_vsoxseg5ei16_v_bf16m1x5(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x5_t vs3, size_t vl); +void __riscv_vsoxseg6ei16_v_bf16m1x6(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x6_t vs3, size_t vl); +void __riscv_vsoxseg7ei16_v_bf16m1x7(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x7_t vs3, size_t vl); +void __riscv_vsoxseg8ei16_v_bf16m1x8(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x8_t vs3, size_t vl); +void __riscv_vsoxseg2ei16_v_bf16m2x2(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x2_t vs3, size_t vl); +void __riscv_vsoxseg3ei16_v_bf16m2x3(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x3_t vs3, size_t vl); +void __riscv_vsoxseg4ei16_v_bf16m2x4(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x4_t vs3, size_t vl); +void __riscv_vsoxseg2ei16_v_bf16m4x2(__bf16 *rs1, vuint16m4_t vs2, + vbfloat16m4x2_t vs3, size_t vl); +void __riscv_vsuxseg2ei16_v_bf16mf4x2(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x2_t vs3, size_t vl); +void __riscv_vsuxseg3ei16_v_bf16mf4x3(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x3_t vs3, size_t vl); +void __riscv_vsuxseg4ei16_v_bf16mf4x4(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x4_t vs3, size_t vl); +void __riscv_vsuxseg5ei16_v_bf16mf4x5(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x5_t vs3, size_t vl); +void __riscv_vsuxseg6ei16_v_bf16mf4x6(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x6_t vs3, size_t vl); +void __riscv_vsuxseg7ei16_v_bf16mf4x7(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x7_t vs3, size_t vl); +void __riscv_vsuxseg8ei16_v_bf16mf4x8(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x8_t vs3, size_t vl); +void __riscv_vsuxseg2ei16_v_bf16mf2x2(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x2_t vs3, size_t vl); +void __riscv_vsuxseg3ei16_v_bf16mf2x3(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x3_t vs3, size_t vl); +void __riscv_vsuxseg4ei16_v_bf16mf2x4(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x4_t vs3, size_t vl); +void __riscv_vsuxseg5ei16_v_bf16mf2x5(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x5_t vs3, size_t vl); +void __riscv_vsuxseg6ei16_v_bf16mf2x6(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x6_t vs3, size_t vl); +void __riscv_vsuxseg7ei16_v_bf16mf2x7(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x7_t vs3, size_t vl); +void __riscv_vsuxseg8ei16_v_bf16mf2x8(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x8_t vs3, size_t vl); +void __riscv_vsuxseg2ei16_v_bf16m1x2(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x2_t vs3, size_t vl); +void __riscv_vsuxseg3ei16_v_bf16m1x3(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x3_t vs3, size_t vl); +void __riscv_vsuxseg4ei16_v_bf16m1x4(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x4_t vs3, size_t vl); +void __riscv_vsuxseg5ei16_v_bf16m1x5(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x5_t vs3, size_t vl); +void __riscv_vsuxseg6ei16_v_bf16m1x6(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x6_t vs3, size_t vl); +void __riscv_vsuxseg7ei16_v_bf16m1x7(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x7_t vs3, size_t vl); +void __riscv_vsuxseg8ei16_v_bf16m1x8(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x8_t vs3, size_t vl); +void __riscv_vsuxseg2ei16_v_bf16m2x2(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x2_t vs3, size_t vl); +void __riscv_vsuxseg3ei16_v_bf16m2x3(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x3_t vs3, size_t vl); +void __riscv_vsuxseg4ei16_v_bf16m2x4(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x4_t vs3, size_t vl); +void __riscv_vsuxseg2ei16_v_bf16m4x2(__bf16 *rs1, vuint16m4_t vs2, + vbfloat16m4x2_t vs3, size_t vl); +// masked functions +void __riscv_vsoxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x2_t vs3, + size_t vl); +void __riscv_vsoxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x3_t vs3, + size_t vl); +void __riscv_vsoxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x4_t vs3, + size_t vl); +void __riscv_vsoxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x5_t vs3, + size_t vl); +void __riscv_vsoxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x6_t vs3, + size_t vl); +void __riscv_vsoxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x7_t vs3, + size_t vl); +void __riscv_vsoxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x8_t vs3, + size_t vl); +void __riscv_vsoxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x2_t vs3, + size_t vl); +void __riscv_vsoxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x3_t vs3, + size_t vl); +void __riscv_vsoxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x4_t vs3, + size_t vl); +void __riscv_vsoxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x5_t vs3, + size_t vl); +void __riscv_vsoxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x6_t vs3, + size_t vl); +void __riscv_vsoxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x7_t vs3, + size_t vl); +void __riscv_vsoxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x8_t vs3, + size_t vl); +void __riscv_vsoxseg2ei16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, + vuint16m1_t vs2, vbfloat16m1x2_t vs3, + size_t vl); +void __riscv_vsoxseg3ei16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, + vuint16m1_t vs2, vbfloat16m1x3_t vs3, + size_t vl); +void __riscv_vsoxseg4ei16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, + vuint16m1_t vs2, vbfloat16m1x4_t vs3, + size_t vl); +void __riscv_vsoxseg5ei16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, + vuint16m1_t vs2, vbfloat16m1x5_t vs3, + size_t vl); +void __riscv_vsoxseg6ei16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, + vuint16m1_t vs2, vbfloat16m1x6_t vs3, + size_t vl); +void __riscv_vsoxseg7ei16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, + vuint16m1_t vs2, vbfloat16m1x7_t vs3, + size_t vl); +void __riscv_vsoxseg8ei16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, + vuint16m1_t vs2, vbfloat16m1x8_t vs3, + size_t vl); +void __riscv_vsoxseg2ei16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, + vuint16m2_t vs2, vbfloat16m2x2_t vs3, + size_t vl); +void __riscv_vsoxseg3ei16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, + vuint16m2_t vs2, vbfloat16m2x3_t vs3, + size_t vl); +void __riscv_vsoxseg4ei16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, + vuint16m2_t vs2, vbfloat16m2x4_t vs3, + size_t vl); +void __riscv_vsoxseg2ei16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, + vuint16m4_t vs2, vbfloat16m4x2_t vs3, + size_t vl); +void __riscv_vsuxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x2_t vs3, + size_t vl); +void __riscv_vsuxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x3_t vs3, + size_t vl); +void __riscv_vsuxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x4_t vs3, + size_t vl); +void __riscv_vsuxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x5_t vs3, + size_t vl); +void __riscv_vsuxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x6_t vs3, + size_t vl); +void __riscv_vsuxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x7_t vs3, + size_t vl); +void __riscv_vsuxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x8_t vs3, + size_t vl); +void __riscv_vsuxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x2_t vs3, + size_t vl); +void __riscv_vsuxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x3_t vs3, + size_t vl); +void __riscv_vsuxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x4_t vs3, + size_t vl); +void __riscv_vsuxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x5_t vs3, + size_t vl); +void __riscv_vsuxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x6_t vs3, + size_t vl); +void __riscv_vsuxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x7_t vs3, + size_t vl); +void __riscv_vsuxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x8_t vs3, + size_t vl); +void __riscv_vsuxseg2ei16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, + vuint16m1_t vs2, vbfloat16m1x2_t vs3, + size_t vl); +void __riscv_vsuxseg3ei16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, + vuint16m1_t vs2, vbfloat16m1x3_t vs3, + size_t vl); +void __riscv_vsuxseg4ei16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, + vuint16m1_t vs2, vbfloat16m1x4_t vs3, + size_t vl); +void __riscv_vsuxseg5ei16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, + vuint16m1_t vs2, vbfloat16m1x5_t vs3, + size_t vl); +void __riscv_vsuxseg6ei16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, + vuint16m1_t vs2, vbfloat16m1x6_t vs3, + size_t vl); +void __riscv_vsuxseg7ei16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, + vuint16m1_t vs2, vbfloat16m1x7_t vs3, + size_t vl); +void __riscv_vsuxseg8ei16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, + vuint16m1_t vs2, vbfloat16m1x8_t vs3, + size_t vl); +void __riscv_vsuxseg2ei16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, + vuint16m2_t vs2, vbfloat16m2x2_t vs3, + size_t vl); +void __riscv_vsuxseg3ei16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, + vuint16m2_t vs2, vbfloat16m2x3_t vs3, + size_t vl); +void __riscv_vsuxseg4ei16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, + vuint16m2_t vs2, vbfloat16m2x4_t vs3, + size_t vl); +void __riscv_vsuxseg2ei16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, + vuint16m4_t vs2, vbfloat16m4x2_t vs3, + size_t vl); +---- + +=== BFloat16 Convert Intrinsics + +[[bf16-vector-narrow-convert]] +==== Vector Narrowing Convert Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vfncvtbf16_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f_f_w_bf16mf4_m(vbool64_t vm, + vfloat32mf2_t vs2, size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_f_w_bf16mf2_m(vbool32_t vm, + vfloat32m1_t vs2, size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2, + size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2, + size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2, + size_t vl); +vbfloat16mf4_t __riscv_vfncvtbf16_f_f_w_bf16mf4_rm(vfloat32mf2_t vs2, + unsigned int frm, size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_f_w_bf16mf2_rm(vfloat32m1_t vs2, + unsigned int frm, size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_f_w_bf16m1_rm(vfloat32m2_t vs2, + unsigned int frm, size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_f_w_bf16m2_rm(vfloat32m4_t vs2, + unsigned int frm, size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_f_w_bf16m4_rm(vfloat32m8_t vs2, + unsigned int frm, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f_f_w_bf16mf4_rm_m(vbool64_t vm, + vfloat32mf2_t vs2, + unsigned int frm, + size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_f_w_bf16mf2_rm_m(vbool32_t vm, + vfloat32m1_t vs2, + unsigned int frm, + size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_f_w_bf16m1_rm_m(vbool16_t vm, + vfloat32m2_t vs2, + unsigned int frm, size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_f_w_bf16m2_rm_m(vbool8_t vm, + vfloat32m4_t vs2, + unsigned int frm, size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_f_w_bf16m4_rm_m(vbool4_t vm, + vfloat32m8_t vs2, + unsigned int frm, size_t vl); +---- + +[[bf16-vector-widening-convert]] +==== Vector Widening Convert Intrinsics + +[,c] +---- +vfloat32mf2_t __riscv_vfwcvtbf16_f_f_v_f32mf2(vbfloat16mf4_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwcvtbf16_f_f_v_f32m1(vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwcvtbf16_f_f_v_f32m2(vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwcvtbf16_f_f_v_f32m4(vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwcvtbf16_f_f_v_f32m8(vbfloat16m4_t vs2, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwcvtbf16_f_f_v_f32mf2_m(vbool64_t vm, + vbfloat16mf4_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwcvtbf16_f_f_v_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + size_t vl); +vfloat32m2_t __riscv_vfwcvtbf16_f_f_v_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vl); +vfloat32m4_t __riscv_vfwcvtbf16_f_f_v_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl); +vfloat32m8_t __riscv_vfwcvtbf16_f_f_v_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl); +---- + +=== BFloat16 Arithmetic Intrinsics + +[[bf16-widening-multiply-accumulate]] +==== Vector Widening Multiply-Accumulate Intrinsics + +[,c] +---- +vfloat32mf2_t __riscv_vfwmaccbf16_vv_f32mf2(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_vf_f32mf2(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vv_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vf_f32m1(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vv_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vf_f32m2(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vv_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vf_f32m4(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vv_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vf_f32m8(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwmaccbf16_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_vv_f32mf2_rm(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_vf_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vv_f32m1_rm(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vf_f32m1_rm(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vv_f32m2_rm(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vf_f32m2_rm(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vv_f32m4_rm(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vf_f32m4_rm(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vv_f32m8_rm(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vf_f32m8_rm(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwmaccbf16_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +---- + +[[vector-bf16-move]] +==== Vector BFloat16 Move Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vmv_v_v_bf16mf4(vbfloat16mf4_t vs1, size_t vl); +vbfloat16mf2_t __riscv_vmv_v_v_bf16mf2(vbfloat16mf2_t vs1, size_t vl); +vbfloat16m1_t __riscv_vmv_v_v_bf16m1(vbfloat16m1_t vs1, size_t vl); +vbfloat16m2_t __riscv_vmv_v_v_bf16m2(vbfloat16m2_t vs1, size_t vl); +vbfloat16m4_t __riscv_vmv_v_v_bf16m4(vbfloat16m4_t vs1, size_t vl); +vbfloat16m8_t __riscv_vmv_v_v_bf16m8(vbfloat16m8_t vs1, size_t vl); +---- + +[[vector-bf16-merge]] +==== Vector BFloat16 Merge Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vmerge_vvm_bf16mf4(vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, vbool64_t v0, + size_t vl); +vbfloat16mf2_t __riscv_vmerge_vvm_bf16mf2(vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, vbool32_t v0, + size_t vl); +vbfloat16m1_t __riscv_vmerge_vvm_bf16m1(vbfloat16m1_t vs2, vbfloat16m1_t vs1, + vbool16_t v0, size_t vl); +vbfloat16m2_t __riscv_vmerge_vvm_bf16m2(vbfloat16m2_t vs2, vbfloat16m2_t vs1, + vbool8_t v0, size_t vl); +vbfloat16m4_t __riscv_vmerge_vvm_bf16m4(vbfloat16m4_t vs2, vbfloat16m4_t vs1, + vbool4_t v0, size_t vl); +vbfloat16m8_t __riscv_vmerge_vvm_bf16m8(vbfloat16m8_t vs2, vbfloat16m8_t vs1, + vbool2_t v0, size_t vl); +---- + +=== BFloat16 Miscellaneous Vector Utility Intrinsics + +[[reinterpret-cast-conversion]] +==== Reinterpret Cast Conversion Intrinsics + +[,c] +---- +// Reinterpret between different type under the same SEW/LMUL +vbfloat16mf4_t __riscv_vreinterpret_v_i16mf4_bf16mf4(vint16mf4_t src); +vbfloat16mf2_t __riscv_vreinterpret_v_i16mf2_bf16mf2(vint16mf2_t src); +vbfloat16m1_t __riscv_vreinterpret_v_i16m1_bf16m1(vint16m1_t src); +vbfloat16m2_t __riscv_vreinterpret_v_i16m2_bf16m2(vint16m2_t src); +vbfloat16m4_t __riscv_vreinterpret_v_i16m4_bf16m4(vint16m4_t src); +vbfloat16m8_t __riscv_vreinterpret_v_i16m8_bf16m8(vint16m8_t src); +vbfloat16mf4_t __riscv_vreinterpret_v_u16mf4_bf16mf4(vuint16mf4_t src); +vbfloat16mf2_t __riscv_vreinterpret_v_u16mf2_bf16mf2(vuint16mf2_t src); +vbfloat16m1_t __riscv_vreinterpret_v_u16m1_bf16m1(vuint16m1_t src); +vbfloat16m2_t __riscv_vreinterpret_v_u16m2_bf16m2(vuint16m2_t src); +vbfloat16m4_t __riscv_vreinterpret_v_u16m4_bf16m4(vuint16m4_t src); +vbfloat16m8_t __riscv_vreinterpret_v_u16m8_bf16m8(vuint16m8_t src); +vint16mf4_t __riscv_vreinterpret_v_bf16mf4_i16mf4(vbfloat16mf4_t src); +vint16mf2_t __riscv_vreinterpret_v_bf16mf2_i16mf2(vbfloat16mf2_t src); +vint16m1_t __riscv_vreinterpret_v_bf16m1_i16m1(vbfloat16m1_t src); +vint16m2_t __riscv_vreinterpret_v_bf16m2_i16m2(vbfloat16m2_t src); +vint16m4_t __riscv_vreinterpret_v_bf16m4_i16m4(vbfloat16m4_t src); +vint16m8_t __riscv_vreinterpret_v_bf16m8_i16m8(vbfloat16m8_t src); +vuint16mf4_t __riscv_vreinterpret_v_bf16mf4_u16mf4(vbfloat16mf4_t src); +vuint16mf2_t __riscv_vreinterpret_v_bf16mf2_u16mf2(vbfloat16mf2_t src); +vuint16m1_t __riscv_vreinterpret_v_bf16m1_u16m1(vbfloat16m1_t src); +vuint16m2_t __riscv_vreinterpret_v_bf16m2_u16m2(vbfloat16m2_t src); +vuint16m4_t __riscv_vreinterpret_v_bf16m4_u16m4(vbfloat16m4_t src); +vuint16m8_t __riscv_vreinterpret_v_bf16m8_u16m8(vbfloat16m8_t src); +---- + +[[vector-lmul-extensionn]] +==== Vector LMUL Extension Intrinsics + +[,c] +---- +vbfloat16mf2_t __riscv_vlmul_ext_v_bf16mf4_bf16mf2(vbfloat16mf4_t value); +vbfloat16m1_t __riscv_vlmul_ext_v_bf16mf4_bf16m1(vbfloat16mf4_t value); +vbfloat16m2_t __riscv_vlmul_ext_v_bf16mf4_bf16m2(vbfloat16mf4_t value); +vbfloat16m4_t __riscv_vlmul_ext_v_bf16mf4_bf16m4(vbfloat16mf4_t value); +vbfloat16m8_t __riscv_vlmul_ext_v_bf16mf4_bf16m8(vbfloat16mf4_t value); +vbfloat16m1_t __riscv_vlmul_ext_v_bf16mf2_bf16m1(vbfloat16mf2_t value); +vbfloat16m2_t __riscv_vlmul_ext_v_bf16mf2_bf16m2(vbfloat16mf2_t value); +vbfloat16m4_t __riscv_vlmul_ext_v_bf16mf2_bf16m4(vbfloat16mf2_t value); +vbfloat16m8_t __riscv_vlmul_ext_v_bf16mf2_bf16m8(vbfloat16mf2_t value); +vbfloat16m2_t __riscv_vlmul_ext_v_bf16m1_bf16m2(vbfloat16m1_t value); +vbfloat16m4_t __riscv_vlmul_ext_v_bf16m1_bf16m4(vbfloat16m1_t value); +vbfloat16m8_t __riscv_vlmul_ext_v_bf16m1_bf16m8(vbfloat16m1_t value); +vbfloat16m4_t __riscv_vlmul_ext_v_bf16m2_bf16m4(vbfloat16m2_t value); +vbfloat16m8_t __riscv_vlmul_ext_v_bf16m2_bf16m8(vbfloat16m2_t value); +vbfloat16m8_t __riscv_vlmul_ext_v_bf16m4_bf16m8(vbfloat16m4_t value); +---- + +[[vector-lmul-truncation]] +==== Vector LMUL Truncation Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vlmul_trunc_v_bf16mf2_bf16mf4(vbfloat16mf2_t value); +vbfloat16mf4_t __riscv_vlmul_trunc_v_bf16m1_bf16mf4(vbfloat16m1_t value); +vbfloat16mf2_t __riscv_vlmul_trunc_v_bf16m1_bf16mf2(vbfloat16m1_t value); +vbfloat16mf4_t __riscv_vlmul_trunc_v_bf16m2_bf16mf4(vbfloat16m2_t value); +vbfloat16mf2_t __riscv_vlmul_trunc_v_bf16m2_bf16mf2(vbfloat16m2_t value); +vbfloat16m1_t __riscv_vlmul_trunc_v_bf16m2_bf16m1(vbfloat16m2_t value); +vbfloat16mf4_t __riscv_vlmul_trunc_v_bf16m4_bf16mf4(vbfloat16m4_t value); +vbfloat16mf2_t __riscv_vlmul_trunc_v_bf16m4_bf16mf2(vbfloat16m4_t value); +vbfloat16m1_t __riscv_vlmul_trunc_v_bf16m4_bf16m1(vbfloat16m4_t value); +vbfloat16m2_t __riscv_vlmul_trunc_v_bf16m4_bf16m2(vbfloat16m4_t value); +vbfloat16mf4_t __riscv_vlmul_trunc_v_bf16m8_bf16mf4(vbfloat16m8_t value); +vbfloat16mf2_t __riscv_vlmul_trunc_v_bf16m8_bf16mf2(vbfloat16m8_t value); +vbfloat16m1_t __riscv_vlmul_trunc_v_bf16m8_bf16m1(vbfloat16m8_t value); +vbfloat16m2_t __riscv_vlmul_trunc_v_bf16m8_bf16m2(vbfloat16m8_t value); +vbfloat16m4_t __riscv_vlmul_trunc_v_bf16m8_bf16m4(vbfloat16m8_t value); +---- + +[[vector-initialization]] +==== Vector Initialization Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vundefined_bf16mf4(); +vbfloat16mf2_t __riscv_vundefined_bf16mf2(); +vbfloat16m1_t __riscv_vundefined_bf16m1(); +vbfloat16m2_t __riscv_vundefined_bf16m2(); +vbfloat16m4_t __riscv_vundefined_bf16m4(); +vbfloat16m8_t __riscv_vundefined_bf16m8(); +vbfloat16mf4x2_t __riscv_vundefined_bf16mf4x2(); +vbfloat16mf4x3_t __riscv_vundefined_bf16mf4x3(); +vbfloat16mf4x4_t __riscv_vundefined_bf16mf4x4(); +vbfloat16mf4x5_t __riscv_vundefined_bf16mf4x5(); +vbfloat16mf4x6_t __riscv_vundefined_bf16mf4x6(); +vbfloat16mf4x7_t __riscv_vundefined_bf16mf4x7(); +vbfloat16mf4x8_t __riscv_vundefined_bf16mf4x8(); +vbfloat16mf2x2_t __riscv_vundefined_bf16mf2x2(); +vbfloat16mf2x3_t __riscv_vundefined_bf16mf2x3(); +vbfloat16mf2x4_t __riscv_vundefined_bf16mf2x4(); +vbfloat16mf2x5_t __riscv_vundefined_bf16mf2x5(); +vbfloat16mf2x6_t __riscv_vundefined_bf16mf2x6(); +vbfloat16mf2x7_t __riscv_vundefined_bf16mf2x7(); +vbfloat16mf2x8_t __riscv_vundefined_bf16mf2x8(); +vbfloat16m1x2_t __riscv_vundefined_bf16m1x2(); +vbfloat16m1x3_t __riscv_vundefined_bf16m1x3(); +vbfloat16m1x4_t __riscv_vundefined_bf16m1x4(); +vbfloat16m1x5_t __riscv_vundefined_bf16m1x5(); +vbfloat16m1x6_t __riscv_vundefined_bf16m1x6(); +vbfloat16m1x7_t __riscv_vundefined_bf16m1x7(); +vbfloat16m1x8_t __riscv_vundefined_bf16m1x8(); +vbfloat16m2x2_t __riscv_vundefined_bf16m2x2(); +vbfloat16m2x3_t __riscv_vundefined_bf16m2x3(); +vbfloat16m2x4_t __riscv_vundefined_bf16m2x4(); +vbfloat16m4x2_t __riscv_vundefined_bf16m4x2(); +---- + +[[vector-insertion]] +==== Vector Insertion Intrinsics + +[,c] +---- +vbfloat16m2_t __riscv_vset_v_bf16m1_bf16m2(vbfloat16m2_t dest, size_t index, + vbfloat16m1_t value); +vbfloat16m4_t __riscv_vset_v_bf16m1_bf16m4(vbfloat16m4_t dest, size_t index, + vbfloat16m1_t value); +vbfloat16m4_t __riscv_vset_v_bf16m2_bf16m4(vbfloat16m4_t dest, size_t index, + vbfloat16m2_t value); +vbfloat16m8_t __riscv_vset_v_bf16m1_bf16m8(vbfloat16m8_t dest, size_t index, + vbfloat16m1_t value); +vbfloat16m8_t __riscv_vset_v_bf16m2_bf16m8(vbfloat16m8_t dest, size_t index, + vbfloat16m2_t value); +vbfloat16m8_t __riscv_vset_v_bf16m4_bf16m8(vbfloat16m8_t dest, size_t index, + vbfloat16m4_t value); +vbfloat16mf4x2_t __riscv_vset_v_bf16mf4_bf16mf4x2(vbfloat16mf4x2_t dest, + size_t index, + vbfloat16mf4_t value); +vbfloat16mf4x3_t __riscv_vset_v_bf16mf4_bf16mf4x3(vbfloat16mf4x3_t dest, + size_t index, + vbfloat16mf4_t value); +vbfloat16mf4x4_t __riscv_vset_v_bf16mf4_bf16mf4x4(vbfloat16mf4x4_t dest, + size_t index, + vbfloat16mf4_t value); +vbfloat16mf4x5_t __riscv_vset_v_bf16mf4_bf16mf4x5(vbfloat16mf4x5_t dest, + size_t index, + vbfloat16mf4_t value); +vbfloat16mf4x6_t __riscv_vset_v_bf16mf4_bf16mf4x6(vbfloat16mf4x6_t dest, + size_t index, + vbfloat16mf4_t value); +vbfloat16mf4x7_t __riscv_vset_v_bf16mf4_bf16mf4x7(vbfloat16mf4x7_t dest, + size_t index, + vbfloat16mf4_t value); +vbfloat16mf4x8_t __riscv_vset_v_bf16mf4_bf16mf4x8(vbfloat16mf4x8_t dest, + size_t index, + vbfloat16mf4_t value); +vbfloat16mf2x2_t __riscv_vset_v_bf16mf2_bf16mf2x2(vbfloat16mf2x2_t dest, + size_t index, + vbfloat16mf2_t value); +vbfloat16mf2x3_t __riscv_vset_v_bf16mf2_bf16mf2x3(vbfloat16mf2x3_t dest, + size_t index, + vbfloat16mf2_t value); +vbfloat16mf2x4_t __riscv_vset_v_bf16mf2_bf16mf2x4(vbfloat16mf2x4_t dest, + size_t index, + vbfloat16mf2_t value); +vbfloat16mf2x5_t __riscv_vset_v_bf16mf2_bf16mf2x5(vbfloat16mf2x5_t dest, + size_t index, + vbfloat16mf2_t value); +vbfloat16mf2x6_t __riscv_vset_v_bf16mf2_bf16mf2x6(vbfloat16mf2x6_t dest, + size_t index, + vbfloat16mf2_t value); +vbfloat16mf2x7_t __riscv_vset_v_bf16mf2_bf16mf2x7(vbfloat16mf2x7_t dest, + size_t index, + vbfloat16mf2_t value); +vbfloat16mf2x8_t __riscv_vset_v_bf16mf2_bf16mf2x8(vbfloat16mf2x8_t dest, + size_t index, + vbfloat16mf2_t value); +vbfloat16m1x2_t __riscv_vset_v_bf16m1_bf16m1x2(vbfloat16m1x2_t dest, + size_t index, + vbfloat16m1_t value); +vbfloat16m1x3_t __riscv_vset_v_bf16m1_bf16m1x3(vbfloat16m1x3_t dest, + size_t index, + vbfloat16m1_t value); +vbfloat16m1x4_t __riscv_vset_v_bf16m1_bf16m1x4(vbfloat16m1x4_t dest, + size_t index, + vbfloat16m1_t value); +vbfloat16m1x5_t __riscv_vset_v_bf16m1_bf16m1x5(vbfloat16m1x5_t dest, + size_t index, + vbfloat16m1_t value); +vbfloat16m1x6_t __riscv_vset_v_bf16m1_bf16m1x6(vbfloat16m1x6_t dest, + size_t index, + vbfloat16m1_t value); +vbfloat16m1x7_t __riscv_vset_v_bf16m1_bf16m1x7(vbfloat16m1x7_t dest, + size_t index, + vbfloat16m1_t value); +vbfloat16m1x8_t __riscv_vset_v_bf16m1_bf16m1x8(vbfloat16m1x8_t dest, + size_t index, + vbfloat16m1_t value); +vbfloat16m2x2_t __riscv_vset_v_bf16m2_bf16m2x2(vbfloat16m2x2_t dest, + size_t index, + vbfloat16m2_t value); +vbfloat16m2x3_t __riscv_vset_v_bf16m2_bf16m2x3(vbfloat16m2x3_t dest, + size_t index, + vbfloat16m2_t value); +vbfloat16m2x4_t __riscv_vset_v_bf16m2_bf16m2x4(vbfloat16m2x4_t dest, + size_t index, + vbfloat16m2_t value); +vbfloat16m4x2_t __riscv_vset_v_bf16m4_bf16m4x2(vbfloat16m4x2_t dest, + size_t index, + vbfloat16m4_t value); +---- + +[[vector-extraction]] +==== Vector Extraction Intrinsics + +[,c] +---- +vbfloat16m1_t __riscv_vget_v_bf16m2_bf16m1(vbfloat16m2_t src, size_t index); +vbfloat16m1_t __riscv_vget_v_bf16m4_bf16m1(vbfloat16m4_t src, size_t index); +vbfloat16m1_t __riscv_vget_v_bf16m8_bf16m1(vbfloat16m8_t src, size_t index); +vbfloat16m2_t __riscv_vget_v_bf16m4_bf16m2(vbfloat16m4_t src, size_t index); +vbfloat16m2_t __riscv_vget_v_bf16m8_bf16m2(vbfloat16m8_t src, size_t index); +vbfloat16m4_t __riscv_vget_v_bf16m8_bf16m4(vbfloat16m8_t src, size_t index); +vbfloat16mf4_t __riscv_vget_v_bf16mf4x2_bf16mf4(vbfloat16mf4x2_t src, + size_t index); +vbfloat16mf4_t __riscv_vget_v_bf16mf4x3_bf16mf4(vbfloat16mf4x3_t src, + size_t index); +vbfloat16mf4_t __riscv_vget_v_bf16mf4x4_bf16mf4(vbfloat16mf4x4_t src, + size_t index); +vbfloat16mf4_t __riscv_vget_v_bf16mf4x5_bf16mf4(vbfloat16mf4x5_t src, + size_t index); +vbfloat16mf4_t __riscv_vget_v_bf16mf4x6_bf16mf4(vbfloat16mf4x6_t src, + size_t index); +vbfloat16mf4_t __riscv_vget_v_bf16mf4x7_bf16mf4(vbfloat16mf4x7_t src, + size_t index); +vbfloat16mf4_t __riscv_vget_v_bf16mf4x8_bf16mf4(vbfloat16mf4x8_t src, + size_t index); +vbfloat16mf2_t __riscv_vget_v_bf16mf2x2_bf16mf2(vbfloat16mf2x2_t src, + size_t index); +vbfloat16mf2_t __riscv_vget_v_bf16mf2x3_bf16mf2(vbfloat16mf2x3_t src, + size_t index); +vbfloat16mf2_t __riscv_vget_v_bf16mf2x4_bf16mf2(vbfloat16mf2x4_t src, + size_t index); +vbfloat16mf2_t __riscv_vget_v_bf16mf2x5_bf16mf2(vbfloat16mf2x5_t src, + size_t index); +vbfloat16mf2_t __riscv_vget_v_bf16mf2x6_bf16mf2(vbfloat16mf2x6_t src, + size_t index); +vbfloat16mf2_t __riscv_vget_v_bf16mf2x7_bf16mf2(vbfloat16mf2x7_t src, + size_t index); +vbfloat16mf2_t __riscv_vget_v_bf16mf2x8_bf16mf2(vbfloat16mf2x8_t src, + size_t index); +vbfloat16m1_t __riscv_vget_v_bf16m1x2_bf16m1(vbfloat16m1x2_t src, size_t index); +vbfloat16m1_t __riscv_vget_v_bf16m1x3_bf16m1(vbfloat16m1x3_t src, size_t index); +vbfloat16m1_t __riscv_vget_v_bf16m1x4_bf16m1(vbfloat16m1x4_t src, size_t index); +vbfloat16m1_t __riscv_vget_v_bf16m1x5_bf16m1(vbfloat16m1x5_t src, size_t index); +vbfloat16m1_t __riscv_vget_v_bf16m1x6_bf16m1(vbfloat16m1x6_t src, size_t index); +vbfloat16m1_t __riscv_vget_v_bf16m1x7_bf16m1(vbfloat16m1x7_t src, size_t index); +vbfloat16m1_t __riscv_vget_v_bf16m1x8_bf16m1(vbfloat16m1x8_t src, size_t index); +vbfloat16m2_t __riscv_vget_v_bf16m2x2_bf16m2(vbfloat16m2x2_t src, size_t index); +vbfloat16m2_t __riscv_vget_v_bf16m2x3_bf16m2(vbfloat16m2x3_t src, size_t index); +vbfloat16m2_t __riscv_vget_v_bf16m2x4_bf16m2(vbfloat16m2x4_t src, size_t index); +vbfloat16m4_t __riscv_vget_v_bf16m4x2_bf16m4(vbfloat16m4x2_t src, size_t index); +---- + +[[vector-creation]] +==== Vector Creation Intrinsics + +[,c] +---- +vbfloat16m2_t __riscv_vcreate_v_bf16m1_bf16m2(vbfloat16m1_t v0, + vbfloat16m1_t v1); +vbfloat16m4_t __riscv_vcreate_v_bf16m1_bf16m4(vbfloat16m1_t v0, + vbfloat16m1_t v1, + vbfloat16m1_t v2, + vbfloat16m1_t v3); +vbfloat16m8_t __riscv_vcreate_v_bf16m1_bf16m8( + vbfloat16m1_t v0, vbfloat16m1_t v1, vbfloat16m1_t v2, vbfloat16m1_t v3, + vbfloat16m1_t v4, vbfloat16m1_t v5, vbfloat16m1_t v6, vbfloat16m1_t v7); +vbfloat16m4_t __riscv_vcreate_v_bf16m2_bf16m4(vbfloat16m2_t v0, + vbfloat16m2_t v1); +vbfloat16m8_t __riscv_vcreate_v_bf16m2_bf16m8(vbfloat16m2_t v0, + vbfloat16m2_t v1, + vbfloat16m2_t v2, + vbfloat16m2_t v3); +vbfloat16m8_t __riscv_vcreate_v_bf16m4_bf16m8(vbfloat16m4_t v0, + vbfloat16m4_t v1); +vbfloat16mf4x2_t __riscv_vcreate_v_bf16mf4x2(vbfloat16mf4_t v0, + vbfloat16mf4_t v1); +vbfloat16mf4x3_t __riscv_vcreate_v_bf16mf4x3(vbfloat16mf4_t v0, + vbfloat16mf4_t v1, + vbfloat16mf4_t v2); +vbfloat16mf4x4_t __riscv_vcreate_v_bf16mf4x4(vbfloat16mf4_t v0, + vbfloat16mf4_t v1, + vbfloat16mf4_t v2, + vbfloat16mf4_t v3); +vbfloat16mf4x5_t __riscv_vcreate_v_bf16mf4x5(vbfloat16mf4_t v0, + vbfloat16mf4_t v1, + vbfloat16mf4_t v2, + vbfloat16mf4_t v3, + vbfloat16mf4_t v4); +vbfloat16mf4x6_t +__riscv_vcreate_v_bf16mf4x6(vbfloat16mf4_t v0, vbfloat16mf4_t v1, + vbfloat16mf4_t v2, vbfloat16mf4_t v3, + vbfloat16mf4_t v4, vbfloat16mf4_t v5); +vbfloat16mf4x7_t __riscv_vcreate_v_bf16mf4x7( + vbfloat16mf4_t v0, vbfloat16mf4_t v1, vbfloat16mf4_t v2, vbfloat16mf4_t v3, + vbfloat16mf4_t v4, vbfloat16mf4_t v5, vbfloat16mf4_t v6); +vbfloat16mf4x8_t __riscv_vcreate_v_bf16mf4x8( + vbfloat16mf4_t v0, vbfloat16mf4_t v1, vbfloat16mf4_t v2, vbfloat16mf4_t v3, + vbfloat16mf4_t v4, vbfloat16mf4_t v5, vbfloat16mf4_t v6, vbfloat16mf4_t v7); +vbfloat16mf2x2_t __riscv_vcreate_v_bf16mf2x2(vbfloat16mf2_t v0, + vbfloat16mf2_t v1); +vbfloat16mf2x3_t __riscv_vcreate_v_bf16mf2x3(vbfloat16mf2_t v0, + vbfloat16mf2_t v1, + vbfloat16mf2_t v2); +vbfloat16mf2x4_t __riscv_vcreate_v_bf16mf2x4(vbfloat16mf2_t v0, + vbfloat16mf2_t v1, + vbfloat16mf2_t v2, + vbfloat16mf2_t v3); +vbfloat16mf2x5_t __riscv_vcreate_v_bf16mf2x5(vbfloat16mf2_t v0, + vbfloat16mf2_t v1, + vbfloat16mf2_t v2, + vbfloat16mf2_t v3, + vbfloat16mf2_t v4); +vbfloat16mf2x6_t +__riscv_vcreate_v_bf16mf2x6(vbfloat16mf2_t v0, vbfloat16mf2_t v1, + vbfloat16mf2_t v2, vbfloat16mf2_t v3, + vbfloat16mf2_t v4, vbfloat16mf2_t v5); +vbfloat16mf2x7_t __riscv_vcreate_v_bf16mf2x7( + vbfloat16mf2_t v0, vbfloat16mf2_t v1, vbfloat16mf2_t v2, vbfloat16mf2_t v3, + vbfloat16mf2_t v4, vbfloat16mf2_t v5, vbfloat16mf2_t v6); +vbfloat16mf2x8_t __riscv_vcreate_v_bf16mf2x8( + vbfloat16mf2_t v0, vbfloat16mf2_t v1, vbfloat16mf2_t v2, vbfloat16mf2_t v3, + vbfloat16mf2_t v4, vbfloat16mf2_t v5, vbfloat16mf2_t v6, vbfloat16mf2_t v7); +vbfloat16m1x2_t __riscv_vcreate_v_bf16m1x2(vbfloat16m1_t v0, vbfloat16m1_t v1); +vbfloat16m1x3_t __riscv_vcreate_v_bf16m1x3(vbfloat16m1_t v0, vbfloat16m1_t v1, + vbfloat16m1_t v2); +vbfloat16m1x4_t __riscv_vcreate_v_bf16m1x4(vbfloat16m1_t v0, vbfloat16m1_t v1, + vbfloat16m1_t v2, vbfloat16m1_t v3); +vbfloat16m1x5_t __riscv_vcreate_v_bf16m1x5(vbfloat16m1_t v0, vbfloat16m1_t v1, + vbfloat16m1_t v2, vbfloat16m1_t v3, + vbfloat16m1_t v4); +vbfloat16m1x6_t __riscv_vcreate_v_bf16m1x6(vbfloat16m1_t v0, vbfloat16m1_t v1, + vbfloat16m1_t v2, vbfloat16m1_t v3, + vbfloat16m1_t v4, vbfloat16m1_t v5); +vbfloat16m1x7_t __riscv_vcreate_v_bf16m1x7(vbfloat16m1_t v0, vbfloat16m1_t v1, + vbfloat16m1_t v2, vbfloat16m1_t v3, + vbfloat16m1_t v4, vbfloat16m1_t v5, + vbfloat16m1_t v6); +vbfloat16m1x8_t __riscv_vcreate_v_bf16m1x8(vbfloat16m1_t v0, vbfloat16m1_t v1, + vbfloat16m1_t v2, vbfloat16m1_t v3, + vbfloat16m1_t v4, vbfloat16m1_t v5, + vbfloat16m1_t v6, vbfloat16m1_t v7); +vbfloat16m2x2_t __riscv_vcreate_v_bf16m2x2(vbfloat16m2_t v0, vbfloat16m2_t v1); +vbfloat16m2x3_t __riscv_vcreate_v_bf16m2x3(vbfloat16m2_t v0, vbfloat16m2_t v1, + vbfloat16m2_t v2); +vbfloat16m2x4_t __riscv_vcreate_v_bf16m2x4(vbfloat16m2_t v0, vbfloat16m2_t v1, + vbfloat16m2_t v2, vbfloat16m2_t v3); +vbfloat16m4x2_t __riscv_vcreate_v_bf16m4x2(vbfloat16m4_t v0, vbfloat16m4_t v1); +---- diff --git a/auto-generated/bfloat16/intrinsic_funcs/00_bfloat16_vector_loads_and_stores_intrinsics.adoc b/auto-generated/bfloat16/intrinsic_funcs/00_bfloat16_vector_loads_and_stores_intrinsics.adoc new file mode 100644 index 000000000..db9f6077c --- /dev/null +++ b/auto-generated/bfloat16/intrinsic_funcs/00_bfloat16_vector_loads_and_stores_intrinsics.adoc @@ -0,0 +1,262 @@ + +=== BFloat16 Vector Loads and Stores Intrinsics + +[[bf16-vector-unit-stride-load]] +==== Vector Unit-Stride Load Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vle16_v_bf16mf4(const __bf16 *rs1, size_t vl); +vbfloat16mf2_t __riscv_vle16_v_bf16mf2(const __bf16 *rs1, size_t vl); +vbfloat16m1_t __riscv_vle16_v_bf16m1(const __bf16 *rs1, size_t vl); +vbfloat16m2_t __riscv_vle16_v_bf16m2(const __bf16 *rs1, size_t vl); +vbfloat16m4_t __riscv_vle16_v_bf16m4(const __bf16 *rs1, size_t vl); +vbfloat16m8_t __riscv_vle16_v_bf16m8(const __bf16 *rs1, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vle16_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16mf2_t __riscv_vle16_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16m1_t __riscv_vle16_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16m2_t __riscv_vle16_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16m4_t __riscv_vle16_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16m8_t __riscv_vle16_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1, + size_t vl); +---- + +[[bf16-vector-unit-stride-store]] +==== Vector Unit-Stride Store Intrinsics + +[,c] +---- +void __riscv_vse16_v_bf16mf4(__bf16 *rs1, vbfloat16mf4_t vs3, size_t vl); +void __riscv_vse16_v_bf16mf2(__bf16 *rs1, vbfloat16mf2_t vs3, size_t vl); +void __riscv_vse16_v_bf16m1(__bf16 *rs1, vbfloat16m1_t vs3, size_t vl); +void __riscv_vse16_v_bf16m2(__bf16 *rs1, vbfloat16m2_t vs3, size_t vl); +void __riscv_vse16_v_bf16m4(__bf16 *rs1, vbfloat16m4_t vs3, size_t vl); +void __riscv_vse16_v_bf16m8(__bf16 *rs1, vbfloat16m8_t vs3, size_t vl); +// masked functions +void __riscv_vse16_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, vbfloat16mf4_t vs3, + size_t vl); +void __riscv_vse16_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, vbfloat16mf2_t vs3, + size_t vl); +void __riscv_vse16_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1_t vs3, + size_t vl); +void __riscv_vse16_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, vbfloat16m2_t vs3, + size_t vl); +void __riscv_vse16_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, vbfloat16m4_t vs3, + size_t vl); +void __riscv_vse16_v_bf16m8_m(vbool2_t vm, __bf16 *rs1, vbfloat16m8_t vs3, + size_t vl); +---- + +[[vector-strided-load]] +==== Vector Strided Load Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vlse16_v_bf16mf4(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2_t __riscv_vlse16_v_bf16mf2(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1_t __riscv_vlse16_v_bf16m1(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vlse16_v_bf16m2(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vlse16_v_bf16m4(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vlse16_v_bf16m8(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vlse16_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vlse16_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1_t __riscv_vlse16_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2_t __riscv_vlse16_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m4_t __riscv_vlse16_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m8_t __riscv_vlse16_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +---- + +[[vector-strided-store]] +==== Vector Strided Store Intrinsics + +[,c] +---- +void __riscv_vsse16_v_bf16mf4(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf4_t vs3, + size_t vl); +void __riscv_vsse16_v_bf16mf2(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf2_t vs3, + size_t vl); +void __riscv_vsse16_v_bf16m1(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1_t vs3, + size_t vl); +void __riscv_vsse16_v_bf16m2(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m2_t vs3, + size_t vl); +void __riscv_vsse16_v_bf16m4(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m4_t vs3, + size_t vl); +void __riscv_vsse16_v_bf16m8(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m8_t vs3, + size_t vl); +// masked functions +void __riscv_vsse16_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4_t vs3, size_t vl); +void __riscv_vsse16_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2_t vs3, size_t vl); +void __riscv_vsse16_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1_t vs3, size_t vl); +void __riscv_vsse16_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2_t vs3, size_t vl); +void __riscv_vsse16_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m4_t vs3, size_t vl); +void __riscv_vsse16_v_bf16m8_m(vbool2_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m8_t vs3, size_t vl); +---- + +[[vector-indexed-load]] +==== Vector Indexed Load Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vloxei16_v_bf16mf4(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2_t __riscv_vloxei16_v_bf16mf2(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1_t __riscv_vloxei16_v_bf16m1(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vloxei16_v_bf16m2(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vloxei16_v_bf16m4(const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vloxei16_v_bf16m8(const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +vbfloat16mf4_t __riscv_vluxei16_v_bf16mf4(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2_t __riscv_vluxei16_v_bf16mf2(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1_t __riscv_vluxei16_v_bf16m1(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vluxei16_v_bf16m2(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vluxei16_v_bf16m4(const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vluxei16_v_bf16m8(const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vloxei16_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vloxei16_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vloxei16_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2_t __riscv_vloxei16_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4_t __riscv_vloxei16_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +vbfloat16m8_t __riscv_vloxei16_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1, + vuint16m8_t rs2, size_t vl); +vbfloat16mf4_t __riscv_vluxei16_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vluxei16_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vluxei16_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2_t __riscv_vluxei16_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4_t __riscv_vluxei16_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +vbfloat16m8_t __riscv_vluxei16_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1, + vuint16m8_t rs2, size_t vl); +---- + +[[vector-indexed-store]] +==== Vector Indexed Store Intrinsics + +[,c] +---- +void __riscv_vsoxei16_v_bf16mf4(__bf16 *rs1, vuint16mf4_t rs2, + vbfloat16mf4_t vs3, size_t vl); +void __riscv_vsoxei16_v_bf16mf2(__bf16 *rs1, vuint16mf2_t rs2, + vbfloat16mf2_t vs3, size_t vl); +void __riscv_vsoxei16_v_bf16m1(__bf16 *rs1, vuint16m1_t rs2, vbfloat16m1_t vs3, + size_t vl); +void __riscv_vsoxei16_v_bf16m2(__bf16 *rs1, vuint16m2_t rs2, vbfloat16m2_t vs3, + size_t vl); +void __riscv_vsoxei16_v_bf16m4(__bf16 *rs1, vuint16m4_t rs2, vbfloat16m4_t vs3, + size_t vl); +void __riscv_vsoxei16_v_bf16m8(__bf16 *rs1, vuint16m8_t rs2, vbfloat16m8_t vs3, + size_t vl); +void __riscv_vsuxei16_v_bf16mf4(__bf16 *rs1, vuint16mf4_t rs2, + vbfloat16mf4_t vs3, size_t vl); +void __riscv_vsuxei16_v_bf16mf2(__bf16 *rs1, vuint16mf2_t rs2, + vbfloat16mf2_t vs3, size_t vl); +void __riscv_vsuxei16_v_bf16m1(__bf16 *rs1, vuint16m1_t rs2, vbfloat16m1_t vs3, + size_t vl); +void __riscv_vsuxei16_v_bf16m2(__bf16 *rs1, vuint16m2_t rs2, vbfloat16m2_t vs3, + size_t vl); +void __riscv_vsuxei16_v_bf16m4(__bf16 *rs1, vuint16m4_t rs2, vbfloat16m4_t vs3, + size_t vl); +void __riscv_vsuxei16_v_bf16m8(__bf16 *rs1, vuint16m8_t rs2, vbfloat16m8_t vs3, + size_t vl); +// masked functions +void __riscv_vsoxei16_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, vuint16mf4_t rs2, + vbfloat16mf4_t vs3, size_t vl); +void __riscv_vsoxei16_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, vuint16mf2_t rs2, + vbfloat16mf2_t vs3, size_t vl); +void __riscv_vsoxei16_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t rs2, + vbfloat16m1_t vs3, size_t vl); +void __riscv_vsoxei16_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t rs2, + vbfloat16m2_t vs3, size_t vl); +void __riscv_vsoxei16_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, vuint16m4_t rs2, + vbfloat16m4_t vs3, size_t vl); +void __riscv_vsoxei16_v_bf16m8_m(vbool2_t vm, __bf16 *rs1, vuint16m8_t rs2, + vbfloat16m8_t vs3, size_t vl); +void __riscv_vsuxei16_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, vuint16mf4_t rs2, + vbfloat16mf4_t vs3, size_t vl); +void __riscv_vsuxei16_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, vuint16mf2_t rs2, + vbfloat16mf2_t vs3, size_t vl); +void __riscv_vsuxei16_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t rs2, + vbfloat16m1_t vs3, size_t vl); +void __riscv_vsuxei16_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t rs2, + vbfloat16m2_t vs3, size_t vl); +void __riscv_vsuxei16_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, vuint16m4_t rs2, + vbfloat16m4_t vs3, size_t vl); +void __riscv_vsuxei16_v_bf16m8_m(vbool2_t vm, __bf16 *rs1, vuint16m8_t rs2, + vbfloat16m8_t vs3, size_t vl); +---- + +[[unit-stride-fault-only-first-loads]] +==== Unit-stride Fault-Only-First Loads Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vle16ff_v_bf16mf4(const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2_t __riscv_vle16ff_v_bf16mf2(const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1_t __riscv_vle16ff_v_bf16m1(const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m2_t __riscv_vle16ff_v_bf16m2(const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m4_t __riscv_vle16ff_v_bf16m4(const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m8_t __riscv_vle16ff_v_bf16m8(const __bf16 *rs1, size_t *new_vl, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vle16ff_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2_t __riscv_vle16ff_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1_t __riscv_vle16ff_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2_t __riscv_vle16ff_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m4_t __riscv_vle16ff_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m8_t __riscv_vle16ff_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +---- diff --git a/auto-generated/bfloat16/intrinsic_funcs/01_bfloat16_vector_loads_and_stores_segment_intrinsics.adoc b/auto-generated/bfloat16/intrinsic_funcs/01_bfloat16_vector_loads_and_stores_segment_intrinsics.adoc new file mode 100644 index 000000000..48e19775a --- /dev/null +++ b/auto-generated/bfloat16/intrinsic_funcs/01_bfloat16_vector_loads_and_stores_segment_intrinsics.adoc @@ -0,0 +1,1077 @@ + +=== BFloat16 Vector Loads and Stores Segment Intrinsics + +[[vector-unit-stride-segment-load]] +==== Vector Unit-Stride Segment Load Intrinsics + +[,c] +---- +vbfloat16mf4x2_t __riscv_vlseg2e16_v_bf16mf4x2(const __bf16 *rs1, size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16_v_bf16mf4x3(const __bf16 *rs1, size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16_v_bf16mf4x4(const __bf16 *rs1, size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16_v_bf16mf4x5(const __bf16 *rs1, size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16_v_bf16mf4x6(const __bf16 *rs1, size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16_v_bf16mf4x7(const __bf16 *rs1, size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16_v_bf16mf4x8(const __bf16 *rs1, size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16_v_bf16mf2x2(const __bf16 *rs1, size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16_v_bf16mf2x3(const __bf16 *rs1, size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16_v_bf16mf2x4(const __bf16 *rs1, size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16_v_bf16mf2x5(const __bf16 *rs1, size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16_v_bf16mf2x6(const __bf16 *rs1, size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16_v_bf16mf2x7(const __bf16 *rs1, size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16_v_bf16mf2x8(const __bf16 *rs1, size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16_v_bf16m1x2(const __bf16 *rs1, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16_v_bf16m1x3(const __bf16 *rs1, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16_v_bf16m1x4(const __bf16 *rs1, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16_v_bf16m1x5(const __bf16 *rs1, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16_v_bf16m1x6(const __bf16 *rs1, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16_v_bf16m1x7(const __bf16 *rs1, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16_v_bf16m1x8(const __bf16 *rs1, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16_v_bf16m2x2(const __bf16 *rs1, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16_v_bf16m2x3(const __bf16 *rs1, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16_v_bf16m2x4(const __bf16 *rs1, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16_v_bf16m4x2(const __bf16 *rs1, size_t vl); +vbfloat16mf4x2_t __riscv_vlseg2e16ff_v_bf16mf4x2(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16ff_v_bf16mf4x3(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16ff_v_bf16mf4x4(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16ff_v_bf16mf4x5(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16ff_v_bf16mf4x6(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16ff_v_bf16mf4x7(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16ff_v_bf16mf4x8(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16ff_v_bf16mf2x2(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16ff_v_bf16mf2x3(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16ff_v_bf16mf2x4(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16ff_v_bf16mf2x5(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16ff_v_bf16mf2x6(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16ff_v_bf16mf2x7(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16ff_v_bf16mf2x8(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16ff_v_bf16m1x2(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16ff_v_bf16m1x3(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16ff_v_bf16m1x4(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16ff_v_bf16m1x5(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16ff_v_bf16m1x6(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16ff_v_bf16m1x7(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16ff_v_bf16m1x8(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16ff_v_bf16m2x2(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16ff_v_bf16m2x3(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16ff_v_bf16m2x4(const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16ff_v_bf16m4x2(const __bf16 *rs1, + size_t *new_vl, size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vlseg2e16_v_bf16mf4x2_m(vbool64_t vm, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16_v_bf16mf4x3_m(vbool64_t vm, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16_v_bf16mf4x4_m(vbool64_t vm, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16_v_bf16mf4x5_m(vbool64_t vm, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16_v_bf16mf4x6_m(vbool64_t vm, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16_v_bf16mf4x7_m(vbool64_t vm, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16_v_bf16mf4x8_m(vbool64_t vm, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16_v_bf16mf2x2_m(vbool32_t vm, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16_v_bf16mf2x3_m(vbool32_t vm, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16_v_bf16mf2x4_m(vbool32_t vm, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16_v_bf16mf2x5_m(vbool32_t vm, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16_v_bf16mf2x6_m(vbool32_t vm, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16_v_bf16mf2x7_m(vbool32_t vm, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16_v_bf16mf2x8_m(vbool32_t vm, + const __bf16 *rs1, size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, + size_t vl); +vbfloat16mf4x2_t __riscv_vlseg2e16ff_v_bf16mf4x2_m(vbool64_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16ff_v_bf16mf4x3_m(vbool64_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16ff_v_bf16mf4x4_m(vbool64_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16ff_v_bf16mf4x5_m(vbool64_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16ff_v_bf16mf4x6_m(vbool64_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16ff_v_bf16mf4x7_m(vbool64_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16ff_v_bf16mf4x8_m(vbool64_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16ff_v_bf16mf2x2_m(vbool32_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16ff_v_bf16mf2x3_m(vbool32_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16ff_v_bf16mf2x4_m(vbool32_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16ff_v_bf16mf2x5_m(vbool32_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16ff_v_bf16mf2x6_m(vbool32_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16ff_v_bf16mf2x7_m(vbool32_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16ff_v_bf16mf2x8_m(vbool32_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16ff_v_bf16m1x2_m(vbool16_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16ff_v_bf16m1x3_m(vbool16_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16ff_v_bf16m1x4_m(vbool16_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16ff_v_bf16m1x5_m(vbool16_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16ff_v_bf16m1x6_m(vbool16_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16ff_v_bf16m1x7_m(vbool16_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16ff_v_bf16m1x8_m(vbool16_t vm, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16ff_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16ff_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16ff_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16ff_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +---- + +[[vecrtor-unit-stride-segment-store]] +==== Vector Unit-Stride Segment Store Intrinsics + +[,c] +---- +void __riscv_vsseg2e16_v_bf16mf4x2(__bf16 *rs1, vbfloat16mf4x2_t vs3, + size_t vl); +void __riscv_vsseg3e16_v_bf16mf4x3(__bf16 *rs1, vbfloat16mf4x3_t vs3, + size_t vl); +void __riscv_vsseg4e16_v_bf16mf4x4(__bf16 *rs1, vbfloat16mf4x4_t vs3, + size_t vl); +void __riscv_vsseg5e16_v_bf16mf4x5(__bf16 *rs1, vbfloat16mf4x5_t vs3, + size_t vl); +void __riscv_vsseg6e16_v_bf16mf4x6(__bf16 *rs1, vbfloat16mf4x6_t vs3, + size_t vl); +void __riscv_vsseg7e16_v_bf16mf4x7(__bf16 *rs1, vbfloat16mf4x7_t vs3, + size_t vl); +void __riscv_vsseg8e16_v_bf16mf4x8(__bf16 *rs1, vbfloat16mf4x8_t vs3, + size_t vl); +void __riscv_vsseg2e16_v_bf16mf2x2(__bf16 *rs1, vbfloat16mf2x2_t vs3, + size_t vl); +void __riscv_vsseg3e16_v_bf16mf2x3(__bf16 *rs1, vbfloat16mf2x3_t vs3, + size_t vl); +void __riscv_vsseg4e16_v_bf16mf2x4(__bf16 *rs1, vbfloat16mf2x4_t vs3, + size_t vl); +void __riscv_vsseg5e16_v_bf16mf2x5(__bf16 *rs1, vbfloat16mf2x5_t vs3, + size_t vl); +void __riscv_vsseg6e16_v_bf16mf2x6(__bf16 *rs1, vbfloat16mf2x6_t vs3, + size_t vl); +void __riscv_vsseg7e16_v_bf16mf2x7(__bf16 *rs1, vbfloat16mf2x7_t vs3, + size_t vl); +void __riscv_vsseg8e16_v_bf16mf2x8(__bf16 *rs1, vbfloat16mf2x8_t vs3, + size_t vl); +void __riscv_vsseg2e16_v_bf16m1x2(__bf16 *rs1, vbfloat16m1x2_t vs3, size_t vl); +void __riscv_vsseg3e16_v_bf16m1x3(__bf16 *rs1, vbfloat16m1x3_t vs3, size_t vl); +void __riscv_vsseg4e16_v_bf16m1x4(__bf16 *rs1, vbfloat16m1x4_t vs3, size_t vl); +void __riscv_vsseg5e16_v_bf16m1x5(__bf16 *rs1, vbfloat16m1x5_t vs3, size_t vl); +void __riscv_vsseg6e16_v_bf16m1x6(__bf16 *rs1, vbfloat16m1x6_t vs3, size_t vl); +void __riscv_vsseg7e16_v_bf16m1x7(__bf16 *rs1, vbfloat16m1x7_t vs3, size_t vl); +void __riscv_vsseg8e16_v_bf16m1x8(__bf16 *rs1, vbfloat16m1x8_t vs3, size_t vl); +void __riscv_vsseg2e16_v_bf16m2x2(__bf16 *rs1, vbfloat16m2x2_t vs3, size_t vl); +void __riscv_vsseg3e16_v_bf16m2x3(__bf16 *rs1, vbfloat16m2x3_t vs3, size_t vl); +void __riscv_vsseg4e16_v_bf16m2x4(__bf16 *rs1, vbfloat16m2x4_t vs3, size_t vl); +void __riscv_vsseg2e16_v_bf16m4x2(__bf16 *rs1, vbfloat16m4x2_t vs3, size_t vl); +// masked functions +void __riscv_vsseg2e16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x2_t vs3, size_t vl); +void __riscv_vsseg3e16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x3_t vs3, size_t vl); +void __riscv_vsseg4e16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x4_t vs3, size_t vl); +void __riscv_vsseg5e16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x5_t vs3, size_t vl); +void __riscv_vsseg6e16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x6_t vs3, size_t vl); +void __riscv_vsseg7e16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x7_t vs3, size_t vl); +void __riscv_vsseg8e16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x8_t vs3, size_t vl); +void __riscv_vsseg2e16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x2_t vs3, size_t vl); +void __riscv_vsseg3e16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x3_t vs3, size_t vl); +void __riscv_vsseg4e16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x4_t vs3, size_t vl); +void __riscv_vsseg5e16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x5_t vs3, size_t vl); +void __riscv_vsseg6e16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x6_t vs3, size_t vl); +void __riscv_vsseg7e16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x7_t vs3, size_t vl); +void __riscv_vsseg8e16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x8_t vs3, size_t vl); +void __riscv_vsseg2e16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, + vbfloat16m1x2_t vs3, size_t vl); +void __riscv_vsseg3e16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, + vbfloat16m1x3_t vs3, size_t vl); +void __riscv_vsseg4e16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, + vbfloat16m1x4_t vs3, size_t vl); +void __riscv_vsseg5e16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, + vbfloat16m1x5_t vs3, size_t vl); +void __riscv_vsseg6e16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, + vbfloat16m1x6_t vs3, size_t vl); +void __riscv_vsseg7e16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, + vbfloat16m1x7_t vs3, size_t vl); +void __riscv_vsseg8e16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, + vbfloat16m1x8_t vs3, size_t vl); +void __riscv_vsseg2e16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, + vbfloat16m2x2_t vs3, size_t vl); +void __riscv_vsseg3e16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, + vbfloat16m2x3_t vs3, size_t vl); +void __riscv_vsseg4e16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, + vbfloat16m2x4_t vs3, size_t vl); +void __riscv_vsseg2e16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, + vbfloat16m4x2_t vs3, size_t vl); +---- + +[[vector-strided-segment-load]] +==== Vector Strided Segment Load Intrinsics + +[,c] +---- +vbfloat16mf4x2_t __riscv_vlsseg2e16_v_bf16mf4x2(const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x3_t __riscv_vlsseg3e16_v_bf16mf4x3(const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x4_t __riscv_vlsseg4e16_v_bf16mf4x4(const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x5_t __riscv_vlsseg5e16_v_bf16mf4x5(const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x6_t __riscv_vlsseg6e16_v_bf16mf4x6(const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x7_t __riscv_vlsseg7e16_v_bf16mf4x7(const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x8_t __riscv_vlsseg8e16_v_bf16mf4x8(const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x2_t __riscv_vlsseg2e16_v_bf16mf2x2(const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x3_t __riscv_vlsseg3e16_v_bf16mf2x3(const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x4_t __riscv_vlsseg4e16_v_bf16mf2x4(const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x5_t __riscv_vlsseg5e16_v_bf16mf2x5(const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x6_t __riscv_vlsseg6e16_v_bf16mf2x6(const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x7_t __riscv_vlsseg7e16_v_bf16mf2x7(const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x8_t __riscv_vlsseg8e16_v_bf16mf2x8(const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x2_t __riscv_vlsseg2e16_v_bf16m1x2(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x3_t __riscv_vlsseg3e16_v_bf16m1x3(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x4_t __riscv_vlsseg4e16_v_bf16m1x4(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x5_t __riscv_vlsseg5e16_v_bf16m1x5(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x6_t __riscv_vlsseg6e16_v_bf16m1x6(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x7_t __riscv_vlsseg7e16_v_bf16m1x7(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x8_t __riscv_vlsseg8e16_v_bf16m1x8(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2x2_t __riscv_vlsseg2e16_v_bf16m2x2(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2x3_t __riscv_vlsseg3e16_v_bf16m2x3(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2x4_t __riscv_vlsseg4e16_v_bf16m2x4(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m4x2_t __riscv_vlsseg2e16_v_bf16m4x2(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vlsseg2e16_v_bf16mf4x2_m(vbool64_t vm, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x3_t __riscv_vlsseg3e16_v_bf16mf4x3_m(vbool64_t vm, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x4_t __riscv_vlsseg4e16_v_bf16mf4x4_m(vbool64_t vm, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x5_t __riscv_vlsseg5e16_v_bf16mf4x5_m(vbool64_t vm, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x6_t __riscv_vlsseg6e16_v_bf16mf4x6_m(vbool64_t vm, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x7_t __riscv_vlsseg7e16_v_bf16mf4x7_m(vbool64_t vm, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x8_t __riscv_vlsseg8e16_v_bf16mf4x8_m(vbool64_t vm, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x2_t __riscv_vlsseg2e16_v_bf16mf2x2_m(vbool32_t vm, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x3_t __riscv_vlsseg3e16_v_bf16mf2x3_m(vbool32_t vm, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x4_t __riscv_vlsseg4e16_v_bf16mf2x4_m(vbool32_t vm, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x5_t __riscv_vlsseg5e16_v_bf16mf2x5_m(vbool32_t vm, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x6_t __riscv_vlsseg6e16_v_bf16mf2x6_m(vbool32_t vm, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x7_t __riscv_vlsseg7e16_v_bf16mf2x7_m(vbool32_t vm, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x8_t __riscv_vlsseg8e16_v_bf16mf2x8_m(vbool32_t vm, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x2_t __riscv_vlsseg2e16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vlsseg3e16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vlsseg4e16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vlsseg5e16_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vlsseg6e16_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vlsseg7e16_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vlsseg8e16_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vlsseg2e16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vlsseg3e16_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vlsseg4e16_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vlsseg2e16_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +---- + +[[vector-strided-segment-store]] +==== Vector Strided Segment Store Intrinsics + +[,c] +---- +void __riscv_vssseg2e16_v_bf16mf4x2(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x2_t vs3, size_t vl); +void __riscv_vssseg3e16_v_bf16mf4x3(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x3_t vs3, size_t vl); +void __riscv_vssseg4e16_v_bf16mf4x4(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x4_t vs3, size_t vl); +void __riscv_vssseg5e16_v_bf16mf4x5(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x5_t vs3, size_t vl); +void __riscv_vssseg6e16_v_bf16mf4x6(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x6_t vs3, size_t vl); +void __riscv_vssseg7e16_v_bf16mf4x7(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x7_t vs3, size_t vl); +void __riscv_vssseg8e16_v_bf16mf4x8(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x8_t vs3, size_t vl); +void __riscv_vssseg2e16_v_bf16mf2x2(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x2_t vs3, size_t vl); +void __riscv_vssseg3e16_v_bf16mf2x3(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x3_t vs3, size_t vl); +void __riscv_vssseg4e16_v_bf16mf2x4(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x4_t vs3, size_t vl); +void __riscv_vssseg5e16_v_bf16mf2x5(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x5_t vs3, size_t vl); +void __riscv_vssseg6e16_v_bf16mf2x6(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x6_t vs3, size_t vl); +void __riscv_vssseg7e16_v_bf16mf2x7(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x7_t vs3, size_t vl); +void __riscv_vssseg8e16_v_bf16mf2x8(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x8_t vs3, size_t vl); +void __riscv_vssseg2e16_v_bf16m1x2(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x2_t vs3, size_t vl); +void __riscv_vssseg3e16_v_bf16m1x3(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x3_t vs3, size_t vl); +void __riscv_vssseg4e16_v_bf16m1x4(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x4_t vs3, size_t vl); +void __riscv_vssseg5e16_v_bf16m1x5(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x5_t vs3, size_t vl); +void __riscv_vssseg6e16_v_bf16m1x6(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x6_t vs3, size_t vl); +void __riscv_vssseg7e16_v_bf16m1x7(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x7_t vs3, size_t vl); +void __riscv_vssseg8e16_v_bf16m1x8(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x8_t vs3, size_t vl); +void __riscv_vssseg2e16_v_bf16m2x2(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2x2_t vs3, size_t vl); +void __riscv_vssseg3e16_v_bf16m2x3(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2x3_t vs3, size_t vl); +void __riscv_vssseg4e16_v_bf16m2x4(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2x4_t vs3, size_t vl); +void __riscv_vssseg2e16_v_bf16m4x2(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16m4x2_t vs3, size_t vl); +// masked functions +void __riscv_vssseg2e16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x2_t vs3, size_t vl); +void __riscv_vssseg3e16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x3_t vs3, size_t vl); +void __riscv_vssseg4e16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x4_t vs3, size_t vl); +void __riscv_vssseg5e16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x5_t vs3, size_t vl); +void __riscv_vssseg6e16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x6_t vs3, size_t vl); +void __riscv_vssseg7e16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x7_t vs3, size_t vl); +void __riscv_vssseg8e16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x8_t vs3, size_t vl); +void __riscv_vssseg2e16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x2_t vs3, size_t vl); +void __riscv_vssseg3e16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x3_t vs3, size_t vl); +void __riscv_vssseg4e16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x4_t vs3, size_t vl); +void __riscv_vssseg5e16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x5_t vs3, size_t vl); +void __riscv_vssseg6e16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x6_t vs3, size_t vl); +void __riscv_vssseg7e16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x7_t vs3, size_t vl); +void __riscv_vssseg8e16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x8_t vs3, size_t vl); +void __riscv_vssseg2e16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x2_t vs3, size_t vl); +void __riscv_vssseg3e16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x3_t vs3, size_t vl); +void __riscv_vssseg4e16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x4_t vs3, size_t vl); +void __riscv_vssseg5e16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x5_t vs3, size_t vl); +void __riscv_vssseg6e16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x6_t vs3, size_t vl); +void __riscv_vssseg7e16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x7_t vs3, size_t vl); +void __riscv_vssseg8e16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x8_t vs3, size_t vl); +void __riscv_vssseg2e16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2x2_t vs3, size_t vl); +void __riscv_vssseg3e16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2x3_t vs3, size_t vl); +void __riscv_vssseg4e16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2x4_t vs3, size_t vl); +void __riscv_vssseg2e16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m4x2_t vs3, size_t vl); +---- + +[[vector-indexed-segment-load]] +==== Vector Indexed Segment Load Intrinsics + +[,c] +---- +vbfloat16mf4x2_t __riscv_vloxseg2ei16_v_bf16mf4x2(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x3_t __riscv_vloxseg3ei16_v_bf16mf4x3(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x4_t __riscv_vloxseg4ei16_v_bf16mf4x4(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x5_t __riscv_vloxseg5ei16_v_bf16mf4x5(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x6_t __riscv_vloxseg6ei16_v_bf16mf4x6(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x7_t __riscv_vloxseg7ei16_v_bf16mf4x7(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x8_t __riscv_vloxseg8ei16_v_bf16mf4x8(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2x2_t __riscv_vloxseg2ei16_v_bf16mf2x2(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x3_t __riscv_vloxseg3ei16_v_bf16mf2x3(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x4_t __riscv_vloxseg4ei16_v_bf16mf2x4(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x5_t __riscv_vloxseg5ei16_v_bf16mf2x5(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x6_t __riscv_vloxseg6ei16_v_bf16mf2x6(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x7_t __riscv_vloxseg7ei16_v_bf16mf2x7(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x8_t __riscv_vloxseg8ei16_v_bf16mf2x8(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1x2_t __riscv_vloxseg2ei16_v_bf16m1x2(const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vloxseg3ei16_v_bf16m1x3(const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vloxseg4ei16_v_bf16m1x4(const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vloxseg5ei16_v_bf16m1x5(const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vloxseg6ei16_v_bf16m1x6(const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vloxseg7ei16_v_bf16m1x7(const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vloxseg8ei16_v_bf16m1x8(const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vloxseg2ei16_v_bf16m2x2(const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vloxseg3ei16_v_bf16m2x3(const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vloxseg4ei16_v_bf16m2x4(const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vloxseg2ei16_v_bf16m4x2(const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +vbfloat16mf4x2_t __riscv_vluxseg2ei16_v_bf16mf4x2(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x3_t __riscv_vluxseg3ei16_v_bf16mf4x3(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x4_t __riscv_vluxseg4ei16_v_bf16mf4x4(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x5_t __riscv_vluxseg5ei16_v_bf16mf4x5(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x6_t __riscv_vluxseg6ei16_v_bf16mf4x6(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x7_t __riscv_vluxseg7ei16_v_bf16mf4x7(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x8_t __riscv_vluxseg8ei16_v_bf16mf4x8(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2x2_t __riscv_vluxseg2ei16_v_bf16mf2x2(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x3_t __riscv_vluxseg3ei16_v_bf16mf2x3(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x4_t __riscv_vluxseg4ei16_v_bf16mf2x4(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x5_t __riscv_vluxseg5ei16_v_bf16mf2x5(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x6_t __riscv_vluxseg6ei16_v_bf16mf2x6(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x7_t __riscv_vluxseg7ei16_v_bf16mf2x7(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x8_t __riscv_vluxseg8ei16_v_bf16mf2x8(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1x2_t __riscv_vluxseg2ei16_v_bf16m1x2(const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vluxseg3ei16_v_bf16m1x3(const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vluxseg4ei16_v_bf16m1x4(const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vluxseg5ei16_v_bf16m1x5(const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vluxseg6ei16_v_bf16m1x6(const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vluxseg7ei16_v_bf16m1x7(const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vluxseg8ei16_v_bf16m1x8(const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vluxseg2ei16_v_bf16m2x2(const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vluxseg3ei16_v_bf16m2x3(const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vluxseg4ei16_v_bf16m2x4(const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vluxseg2ei16_v_bf16m4x2(const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vloxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vloxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vloxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vloxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vloxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vloxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vloxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vloxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vloxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vloxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vloxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vloxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vloxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vloxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vloxseg2ei16_v_bf16m1x2_m(vbool16_t vm, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vloxseg3ei16_v_bf16m1x3_m(vbool16_t vm, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vloxseg4ei16_v_bf16m1x4_m(vbool16_t vm, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vloxseg5ei16_v_bf16m1x5_m(vbool16_t vm, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vloxseg6ei16_v_bf16m1x6_m(vbool16_t vm, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vloxseg7ei16_v_bf16m1x7_m(vbool16_t vm, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vloxseg8ei16_v_bf16m1x8_m(vbool16_t vm, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vloxseg2ei16_v_bf16m2x2_m(vbool8_t vm, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vloxseg3ei16_v_bf16m2x3_m(vbool8_t vm, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vloxseg4ei16_v_bf16m2x4_m(vbool8_t vm, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vloxseg2ei16_v_bf16m4x2_m(vbool4_t vm, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +vbfloat16mf4x2_t __riscv_vluxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vluxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vluxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vluxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vluxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vluxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vluxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vluxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vluxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vluxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vluxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vluxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vluxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vluxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vluxseg2ei16_v_bf16m1x2_m(vbool16_t vm, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vluxseg3ei16_v_bf16m1x3_m(vbool16_t vm, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vluxseg4ei16_v_bf16m1x4_m(vbool16_t vm, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vluxseg5ei16_v_bf16m1x5_m(vbool16_t vm, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vluxseg6ei16_v_bf16m1x6_m(vbool16_t vm, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vluxseg7ei16_v_bf16m1x7_m(vbool16_t vm, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vluxseg8ei16_v_bf16m1x8_m(vbool16_t vm, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vluxseg2ei16_v_bf16m2x2_m(vbool8_t vm, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vluxseg3ei16_v_bf16m2x3_m(vbool8_t vm, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vluxseg4ei16_v_bf16m2x4_m(vbool8_t vm, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vluxseg2ei16_v_bf16m4x2_m(vbool4_t vm, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +---- + +[[vector-indexed-segment-store]] +==== Vector Indexed Segment Store Intrinsics + +[,c] +---- +void __riscv_vsoxseg2ei16_v_bf16mf4x2(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x2_t vs3, size_t vl); +void __riscv_vsoxseg3ei16_v_bf16mf4x3(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x3_t vs3, size_t vl); +void __riscv_vsoxseg4ei16_v_bf16mf4x4(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x4_t vs3, size_t vl); +void __riscv_vsoxseg5ei16_v_bf16mf4x5(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x5_t vs3, size_t vl); +void __riscv_vsoxseg6ei16_v_bf16mf4x6(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x6_t vs3, size_t vl); +void __riscv_vsoxseg7ei16_v_bf16mf4x7(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x7_t vs3, size_t vl); +void __riscv_vsoxseg8ei16_v_bf16mf4x8(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x8_t vs3, size_t vl); +void __riscv_vsoxseg2ei16_v_bf16mf2x2(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x2_t vs3, size_t vl); +void __riscv_vsoxseg3ei16_v_bf16mf2x3(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x3_t vs3, size_t vl); +void __riscv_vsoxseg4ei16_v_bf16mf2x4(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x4_t vs3, size_t vl); +void __riscv_vsoxseg5ei16_v_bf16mf2x5(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x5_t vs3, size_t vl); +void __riscv_vsoxseg6ei16_v_bf16mf2x6(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x6_t vs3, size_t vl); +void __riscv_vsoxseg7ei16_v_bf16mf2x7(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x7_t vs3, size_t vl); +void __riscv_vsoxseg8ei16_v_bf16mf2x8(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x8_t vs3, size_t vl); +void __riscv_vsoxseg2ei16_v_bf16m1x2(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x2_t vs3, size_t vl); +void __riscv_vsoxseg3ei16_v_bf16m1x3(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x3_t vs3, size_t vl); +void __riscv_vsoxseg4ei16_v_bf16m1x4(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x4_t vs3, size_t vl); +void __riscv_vsoxseg5ei16_v_bf16m1x5(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x5_t vs3, size_t vl); +void __riscv_vsoxseg6ei16_v_bf16m1x6(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x6_t vs3, size_t vl); +void __riscv_vsoxseg7ei16_v_bf16m1x7(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x7_t vs3, size_t vl); +void __riscv_vsoxseg8ei16_v_bf16m1x8(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x8_t vs3, size_t vl); +void __riscv_vsoxseg2ei16_v_bf16m2x2(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x2_t vs3, size_t vl); +void __riscv_vsoxseg3ei16_v_bf16m2x3(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x3_t vs3, size_t vl); +void __riscv_vsoxseg4ei16_v_bf16m2x4(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x4_t vs3, size_t vl); +void __riscv_vsoxseg2ei16_v_bf16m4x2(__bf16 *rs1, vuint16m4_t vs2, + vbfloat16m4x2_t vs3, size_t vl); +void __riscv_vsuxseg2ei16_v_bf16mf4x2(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x2_t vs3, size_t vl); +void __riscv_vsuxseg3ei16_v_bf16mf4x3(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x3_t vs3, size_t vl); +void __riscv_vsuxseg4ei16_v_bf16mf4x4(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x4_t vs3, size_t vl); +void __riscv_vsuxseg5ei16_v_bf16mf4x5(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x5_t vs3, size_t vl); +void __riscv_vsuxseg6ei16_v_bf16mf4x6(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x6_t vs3, size_t vl); +void __riscv_vsuxseg7ei16_v_bf16mf4x7(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x7_t vs3, size_t vl); +void __riscv_vsuxseg8ei16_v_bf16mf4x8(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x8_t vs3, size_t vl); +void __riscv_vsuxseg2ei16_v_bf16mf2x2(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x2_t vs3, size_t vl); +void __riscv_vsuxseg3ei16_v_bf16mf2x3(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x3_t vs3, size_t vl); +void __riscv_vsuxseg4ei16_v_bf16mf2x4(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x4_t vs3, size_t vl); +void __riscv_vsuxseg5ei16_v_bf16mf2x5(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x5_t vs3, size_t vl); +void __riscv_vsuxseg6ei16_v_bf16mf2x6(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x6_t vs3, size_t vl); +void __riscv_vsuxseg7ei16_v_bf16mf2x7(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x7_t vs3, size_t vl); +void __riscv_vsuxseg8ei16_v_bf16mf2x8(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x8_t vs3, size_t vl); +void __riscv_vsuxseg2ei16_v_bf16m1x2(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x2_t vs3, size_t vl); +void __riscv_vsuxseg3ei16_v_bf16m1x3(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x3_t vs3, size_t vl); +void __riscv_vsuxseg4ei16_v_bf16m1x4(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x4_t vs3, size_t vl); +void __riscv_vsuxseg5ei16_v_bf16m1x5(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x5_t vs3, size_t vl); +void __riscv_vsuxseg6ei16_v_bf16m1x6(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x6_t vs3, size_t vl); +void __riscv_vsuxseg7ei16_v_bf16m1x7(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x7_t vs3, size_t vl); +void __riscv_vsuxseg8ei16_v_bf16m1x8(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x8_t vs3, size_t vl); +void __riscv_vsuxseg2ei16_v_bf16m2x2(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x2_t vs3, size_t vl); +void __riscv_vsuxseg3ei16_v_bf16m2x3(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x3_t vs3, size_t vl); +void __riscv_vsuxseg4ei16_v_bf16m2x4(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x4_t vs3, size_t vl); +void __riscv_vsuxseg2ei16_v_bf16m4x2(__bf16 *rs1, vuint16m4_t vs2, + vbfloat16m4x2_t vs3, size_t vl); +// masked functions +void __riscv_vsoxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x2_t vs3, + size_t vl); +void __riscv_vsoxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x3_t vs3, + size_t vl); +void __riscv_vsoxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x4_t vs3, + size_t vl); +void __riscv_vsoxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x5_t vs3, + size_t vl); +void __riscv_vsoxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x6_t vs3, + size_t vl); +void __riscv_vsoxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x7_t vs3, + size_t vl); +void __riscv_vsoxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x8_t vs3, + size_t vl); +void __riscv_vsoxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x2_t vs3, + size_t vl); +void __riscv_vsoxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x3_t vs3, + size_t vl); +void __riscv_vsoxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x4_t vs3, + size_t vl); +void __riscv_vsoxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x5_t vs3, + size_t vl); +void __riscv_vsoxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x6_t vs3, + size_t vl); +void __riscv_vsoxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x7_t vs3, + size_t vl); +void __riscv_vsoxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x8_t vs3, + size_t vl); +void __riscv_vsoxseg2ei16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, + vuint16m1_t vs2, vbfloat16m1x2_t vs3, + size_t vl); +void __riscv_vsoxseg3ei16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, + vuint16m1_t vs2, vbfloat16m1x3_t vs3, + size_t vl); +void __riscv_vsoxseg4ei16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, + vuint16m1_t vs2, vbfloat16m1x4_t vs3, + size_t vl); +void __riscv_vsoxseg5ei16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, + vuint16m1_t vs2, vbfloat16m1x5_t vs3, + size_t vl); +void __riscv_vsoxseg6ei16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, + vuint16m1_t vs2, vbfloat16m1x6_t vs3, + size_t vl); +void __riscv_vsoxseg7ei16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, + vuint16m1_t vs2, vbfloat16m1x7_t vs3, + size_t vl); +void __riscv_vsoxseg8ei16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, + vuint16m1_t vs2, vbfloat16m1x8_t vs3, + size_t vl); +void __riscv_vsoxseg2ei16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, + vuint16m2_t vs2, vbfloat16m2x2_t vs3, + size_t vl); +void __riscv_vsoxseg3ei16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, + vuint16m2_t vs2, vbfloat16m2x3_t vs3, + size_t vl); +void __riscv_vsoxseg4ei16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, + vuint16m2_t vs2, vbfloat16m2x4_t vs3, + size_t vl); +void __riscv_vsoxseg2ei16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, + vuint16m4_t vs2, vbfloat16m4x2_t vs3, + size_t vl); +void __riscv_vsuxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x2_t vs3, + size_t vl); +void __riscv_vsuxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x3_t vs3, + size_t vl); +void __riscv_vsuxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x4_t vs3, + size_t vl); +void __riscv_vsuxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x5_t vs3, + size_t vl); +void __riscv_vsuxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x6_t vs3, + size_t vl); +void __riscv_vsuxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x7_t vs3, + size_t vl); +void __riscv_vsuxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x8_t vs3, + size_t vl); +void __riscv_vsuxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x2_t vs3, + size_t vl); +void __riscv_vsuxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x3_t vs3, + size_t vl); +void __riscv_vsuxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x4_t vs3, + size_t vl); +void __riscv_vsuxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x5_t vs3, + size_t vl); +void __riscv_vsuxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x6_t vs3, + size_t vl); +void __riscv_vsuxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x7_t vs3, + size_t vl); +void __riscv_vsuxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x8_t vs3, + size_t vl); +void __riscv_vsuxseg2ei16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, + vuint16m1_t vs2, vbfloat16m1x2_t vs3, + size_t vl); +void __riscv_vsuxseg3ei16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, + vuint16m1_t vs2, vbfloat16m1x3_t vs3, + size_t vl); +void __riscv_vsuxseg4ei16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, + vuint16m1_t vs2, vbfloat16m1x4_t vs3, + size_t vl); +void __riscv_vsuxseg5ei16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, + vuint16m1_t vs2, vbfloat16m1x5_t vs3, + size_t vl); +void __riscv_vsuxseg6ei16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, + vuint16m1_t vs2, vbfloat16m1x6_t vs3, + size_t vl); +void __riscv_vsuxseg7ei16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, + vuint16m1_t vs2, vbfloat16m1x7_t vs3, + size_t vl); +void __riscv_vsuxseg8ei16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, + vuint16m1_t vs2, vbfloat16m1x8_t vs3, + size_t vl); +void __riscv_vsuxseg2ei16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, + vuint16m2_t vs2, vbfloat16m2x2_t vs3, + size_t vl); +void __riscv_vsuxseg3ei16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, + vuint16m2_t vs2, vbfloat16m2x3_t vs3, + size_t vl); +void __riscv_vsuxseg4ei16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, + vuint16m2_t vs2, vbfloat16m2x4_t vs3, + size_t vl); +void __riscv_vsuxseg2ei16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, + vuint16m4_t vs2, vbfloat16m4x2_t vs3, + size_t vl); +---- diff --git a/auto-generated/bfloat16/intrinsic_funcs/02_bfloat16_convert_intrinsics.adoc b/auto-generated/bfloat16/intrinsic_funcs/02_bfloat16_convert_intrinsics.adoc new file mode 100644 index 000000000..a6e7b0277 --- /dev/null +++ b/auto-generated/bfloat16/intrinsic_funcs/02_bfloat16_convert_intrinsics.adoc @@ -0,0 +1,76 @@ + +=== BFloat16 Convert Intrinsics + +[[bf16-vector-narrow-convert]] +==== Vector Narrowing Convert Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vfncvtbf16_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f_f_w_bf16mf4_m(vbool64_t vm, + vfloat32mf2_t vs2, size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_f_w_bf16mf2_m(vbool32_t vm, + vfloat32m1_t vs2, size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2, + size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2, + size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2, + size_t vl); +vbfloat16mf4_t __riscv_vfncvtbf16_f_f_w_bf16mf4_rm(vfloat32mf2_t vs2, + unsigned int frm, size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_f_w_bf16mf2_rm(vfloat32m1_t vs2, + unsigned int frm, size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_f_w_bf16m1_rm(vfloat32m2_t vs2, + unsigned int frm, size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_f_w_bf16m2_rm(vfloat32m4_t vs2, + unsigned int frm, size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_f_w_bf16m4_rm(vfloat32m8_t vs2, + unsigned int frm, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f_f_w_bf16mf4_rm_m(vbool64_t vm, + vfloat32mf2_t vs2, + unsigned int frm, + size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_f_w_bf16mf2_rm_m(vbool32_t vm, + vfloat32m1_t vs2, + unsigned int frm, + size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_f_w_bf16m1_rm_m(vbool16_t vm, + vfloat32m2_t vs2, + unsigned int frm, size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_f_w_bf16m2_rm_m(vbool8_t vm, + vfloat32m4_t vs2, + unsigned int frm, size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_f_w_bf16m4_rm_m(vbool4_t vm, + vfloat32m8_t vs2, + unsigned int frm, size_t vl); +---- + +[[bf16-vector-widening-convert]] +==== Vector Widening Convert Intrinsics + +[,c] +---- +vfloat32mf2_t __riscv_vfwcvtbf16_f_f_v_f32mf2(vbfloat16mf4_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwcvtbf16_f_f_v_f32m1(vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwcvtbf16_f_f_v_f32m2(vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwcvtbf16_f_f_v_f32m4(vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwcvtbf16_f_f_v_f32m8(vbfloat16m4_t vs2, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwcvtbf16_f_f_v_f32mf2_m(vbool64_t vm, + vbfloat16mf4_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwcvtbf16_f_f_v_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + size_t vl); +vfloat32m2_t __riscv_vfwcvtbf16_f_f_v_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vl); +vfloat32m4_t __riscv_vfwcvtbf16_f_f_v_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl); +vfloat32m8_t __riscv_vfwcvtbf16_f_f_v_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl); +---- diff --git a/auto-generated/bfloat16/intrinsic_funcs/03_bfloat16_arithmetic_intrinsics.adoc b/auto-generated/bfloat16/intrinsic_funcs/03_bfloat16_arithmetic_intrinsics.adoc new file mode 100644 index 000000000..558919dae --- /dev/null +++ b/auto-generated/bfloat16/intrinsic_funcs/03_bfloat16_arithmetic_intrinsics.adoc @@ -0,0 +1,163 @@ + +=== BFloat16 Arithmetic Intrinsics + +[[bf16-widening-multiply-accumulate]] +==== Vector Widening Multiply-Accumulate Intrinsics + +[,c] +---- +vfloat32mf2_t __riscv_vfwmaccbf16_vv_f32mf2(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_vf_f32mf2(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vv_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vf_f32m1(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vv_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vf_f32m2(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vv_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vf_f32m4(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vv_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vf_f32m8(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwmaccbf16_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_vv_f32mf2_rm(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_vf_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vv_f32m1_rm(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vf_f32m1_rm(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vv_f32m2_rm(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vf_f32m2_rm(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vv_f32m4_rm(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vf_f32m4_rm(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vv_f32m8_rm(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vf_f32m8_rm(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwmaccbf16_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +---- + +[[vector-bf16-move]] +==== Vector BFloat16 Move Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vmv_v_v_bf16mf4(vbfloat16mf4_t vs1, size_t vl); +vbfloat16mf2_t __riscv_vmv_v_v_bf16mf2(vbfloat16mf2_t vs1, size_t vl); +vbfloat16m1_t __riscv_vmv_v_v_bf16m1(vbfloat16m1_t vs1, size_t vl); +vbfloat16m2_t __riscv_vmv_v_v_bf16m2(vbfloat16m2_t vs1, size_t vl); +vbfloat16m4_t __riscv_vmv_v_v_bf16m4(vbfloat16m4_t vs1, size_t vl); +vbfloat16m8_t __riscv_vmv_v_v_bf16m8(vbfloat16m8_t vs1, size_t vl); +---- + +[[vector-bf16-merge]] +==== Vector BFloat16 Merge Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vmerge_vvm_bf16mf4(vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, vbool64_t v0, + size_t vl); +vbfloat16mf2_t __riscv_vmerge_vvm_bf16mf2(vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, vbool32_t v0, + size_t vl); +vbfloat16m1_t __riscv_vmerge_vvm_bf16m1(vbfloat16m1_t vs2, vbfloat16m1_t vs1, + vbool16_t v0, size_t vl); +vbfloat16m2_t __riscv_vmerge_vvm_bf16m2(vbfloat16m2_t vs2, vbfloat16m2_t vs1, + vbool8_t v0, size_t vl); +vbfloat16m4_t __riscv_vmerge_vvm_bf16m4(vbfloat16m4_t vs2, vbfloat16m4_t vs1, + vbool4_t v0, size_t vl); +vbfloat16m8_t __riscv_vmerge_vvm_bf16m8(vbfloat16m8_t vs2, vbfloat16m8_t vs1, + vbool2_t v0, size_t vl); +---- diff --git a/auto-generated/bfloat16/intrinsic_funcs/04_bfloat16_miscellaneous_vector_utility_intrinsics.adoc b/auto-generated/bfloat16/intrinsic_funcs/04_bfloat16_miscellaneous_vector_utility_intrinsics.adoc new file mode 100644 index 000000000..9843290f7 --- /dev/null +++ b/auto-generated/bfloat16/intrinsic_funcs/04_bfloat16_miscellaneous_vector_utility_intrinsics.adoc @@ -0,0 +1,359 @@ + +=== BFloat16 Miscellaneous Vector Utility Intrinsics + +[[reinterpret-cast-conversion]] +==== Reinterpret Cast Conversion Intrinsics + +[,c] +---- +// Reinterpret between different type under the same SEW/LMUL +vbfloat16mf4_t __riscv_vreinterpret_v_i16mf4_bf16mf4(vint16mf4_t src); +vbfloat16mf2_t __riscv_vreinterpret_v_i16mf2_bf16mf2(vint16mf2_t src); +vbfloat16m1_t __riscv_vreinterpret_v_i16m1_bf16m1(vint16m1_t src); +vbfloat16m2_t __riscv_vreinterpret_v_i16m2_bf16m2(vint16m2_t src); +vbfloat16m4_t __riscv_vreinterpret_v_i16m4_bf16m4(vint16m4_t src); +vbfloat16m8_t __riscv_vreinterpret_v_i16m8_bf16m8(vint16m8_t src); +vbfloat16mf4_t __riscv_vreinterpret_v_u16mf4_bf16mf4(vuint16mf4_t src); +vbfloat16mf2_t __riscv_vreinterpret_v_u16mf2_bf16mf2(vuint16mf2_t src); +vbfloat16m1_t __riscv_vreinterpret_v_u16m1_bf16m1(vuint16m1_t src); +vbfloat16m2_t __riscv_vreinterpret_v_u16m2_bf16m2(vuint16m2_t src); +vbfloat16m4_t __riscv_vreinterpret_v_u16m4_bf16m4(vuint16m4_t src); +vbfloat16m8_t __riscv_vreinterpret_v_u16m8_bf16m8(vuint16m8_t src); +vint16mf4_t __riscv_vreinterpret_v_bf16mf4_i16mf4(vbfloat16mf4_t src); +vint16mf2_t __riscv_vreinterpret_v_bf16mf2_i16mf2(vbfloat16mf2_t src); +vint16m1_t __riscv_vreinterpret_v_bf16m1_i16m1(vbfloat16m1_t src); +vint16m2_t __riscv_vreinterpret_v_bf16m2_i16m2(vbfloat16m2_t src); +vint16m4_t __riscv_vreinterpret_v_bf16m4_i16m4(vbfloat16m4_t src); +vint16m8_t __riscv_vreinterpret_v_bf16m8_i16m8(vbfloat16m8_t src); +vuint16mf4_t __riscv_vreinterpret_v_bf16mf4_u16mf4(vbfloat16mf4_t src); +vuint16mf2_t __riscv_vreinterpret_v_bf16mf2_u16mf2(vbfloat16mf2_t src); +vuint16m1_t __riscv_vreinterpret_v_bf16m1_u16m1(vbfloat16m1_t src); +vuint16m2_t __riscv_vreinterpret_v_bf16m2_u16m2(vbfloat16m2_t src); +vuint16m4_t __riscv_vreinterpret_v_bf16m4_u16m4(vbfloat16m4_t src); +vuint16m8_t __riscv_vreinterpret_v_bf16m8_u16m8(vbfloat16m8_t src); +---- + +[[vector-lmul-extensionn]] +==== Vector LMUL Extension Intrinsics + +[,c] +---- +vbfloat16mf2_t __riscv_vlmul_ext_v_bf16mf4_bf16mf2(vbfloat16mf4_t value); +vbfloat16m1_t __riscv_vlmul_ext_v_bf16mf4_bf16m1(vbfloat16mf4_t value); +vbfloat16m2_t __riscv_vlmul_ext_v_bf16mf4_bf16m2(vbfloat16mf4_t value); +vbfloat16m4_t __riscv_vlmul_ext_v_bf16mf4_bf16m4(vbfloat16mf4_t value); +vbfloat16m8_t __riscv_vlmul_ext_v_bf16mf4_bf16m8(vbfloat16mf4_t value); +vbfloat16m1_t __riscv_vlmul_ext_v_bf16mf2_bf16m1(vbfloat16mf2_t value); +vbfloat16m2_t __riscv_vlmul_ext_v_bf16mf2_bf16m2(vbfloat16mf2_t value); +vbfloat16m4_t __riscv_vlmul_ext_v_bf16mf2_bf16m4(vbfloat16mf2_t value); +vbfloat16m8_t __riscv_vlmul_ext_v_bf16mf2_bf16m8(vbfloat16mf2_t value); +vbfloat16m2_t __riscv_vlmul_ext_v_bf16m1_bf16m2(vbfloat16m1_t value); +vbfloat16m4_t __riscv_vlmul_ext_v_bf16m1_bf16m4(vbfloat16m1_t value); +vbfloat16m8_t __riscv_vlmul_ext_v_bf16m1_bf16m8(vbfloat16m1_t value); +vbfloat16m4_t __riscv_vlmul_ext_v_bf16m2_bf16m4(vbfloat16m2_t value); +vbfloat16m8_t __riscv_vlmul_ext_v_bf16m2_bf16m8(vbfloat16m2_t value); +vbfloat16m8_t __riscv_vlmul_ext_v_bf16m4_bf16m8(vbfloat16m4_t value); +---- + +[[vector-lmul-truncation]] +==== Vector LMUL Truncation Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vlmul_trunc_v_bf16mf2_bf16mf4(vbfloat16mf2_t value); +vbfloat16mf4_t __riscv_vlmul_trunc_v_bf16m1_bf16mf4(vbfloat16m1_t value); +vbfloat16mf2_t __riscv_vlmul_trunc_v_bf16m1_bf16mf2(vbfloat16m1_t value); +vbfloat16mf4_t __riscv_vlmul_trunc_v_bf16m2_bf16mf4(vbfloat16m2_t value); +vbfloat16mf2_t __riscv_vlmul_trunc_v_bf16m2_bf16mf2(vbfloat16m2_t value); +vbfloat16m1_t __riscv_vlmul_trunc_v_bf16m2_bf16m1(vbfloat16m2_t value); +vbfloat16mf4_t __riscv_vlmul_trunc_v_bf16m4_bf16mf4(vbfloat16m4_t value); +vbfloat16mf2_t __riscv_vlmul_trunc_v_bf16m4_bf16mf2(vbfloat16m4_t value); +vbfloat16m1_t __riscv_vlmul_trunc_v_bf16m4_bf16m1(vbfloat16m4_t value); +vbfloat16m2_t __riscv_vlmul_trunc_v_bf16m4_bf16m2(vbfloat16m4_t value); +vbfloat16mf4_t __riscv_vlmul_trunc_v_bf16m8_bf16mf4(vbfloat16m8_t value); +vbfloat16mf2_t __riscv_vlmul_trunc_v_bf16m8_bf16mf2(vbfloat16m8_t value); +vbfloat16m1_t __riscv_vlmul_trunc_v_bf16m8_bf16m1(vbfloat16m8_t value); +vbfloat16m2_t __riscv_vlmul_trunc_v_bf16m8_bf16m2(vbfloat16m8_t value); +vbfloat16m4_t __riscv_vlmul_trunc_v_bf16m8_bf16m4(vbfloat16m8_t value); +---- + +[[vector-initialization]] +==== Vector Initialization Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vundefined_bf16mf4(); +vbfloat16mf2_t __riscv_vundefined_bf16mf2(); +vbfloat16m1_t __riscv_vundefined_bf16m1(); +vbfloat16m2_t __riscv_vundefined_bf16m2(); +vbfloat16m4_t __riscv_vundefined_bf16m4(); +vbfloat16m8_t __riscv_vundefined_bf16m8(); +vbfloat16mf4x2_t __riscv_vundefined_bf16mf4x2(); +vbfloat16mf4x3_t __riscv_vundefined_bf16mf4x3(); +vbfloat16mf4x4_t __riscv_vundefined_bf16mf4x4(); +vbfloat16mf4x5_t __riscv_vundefined_bf16mf4x5(); +vbfloat16mf4x6_t __riscv_vundefined_bf16mf4x6(); +vbfloat16mf4x7_t __riscv_vundefined_bf16mf4x7(); +vbfloat16mf4x8_t __riscv_vundefined_bf16mf4x8(); +vbfloat16mf2x2_t __riscv_vundefined_bf16mf2x2(); +vbfloat16mf2x3_t __riscv_vundefined_bf16mf2x3(); +vbfloat16mf2x4_t __riscv_vundefined_bf16mf2x4(); +vbfloat16mf2x5_t __riscv_vundefined_bf16mf2x5(); +vbfloat16mf2x6_t __riscv_vundefined_bf16mf2x6(); +vbfloat16mf2x7_t __riscv_vundefined_bf16mf2x7(); +vbfloat16mf2x8_t __riscv_vundefined_bf16mf2x8(); +vbfloat16m1x2_t __riscv_vundefined_bf16m1x2(); +vbfloat16m1x3_t __riscv_vundefined_bf16m1x3(); +vbfloat16m1x4_t __riscv_vundefined_bf16m1x4(); +vbfloat16m1x5_t __riscv_vundefined_bf16m1x5(); +vbfloat16m1x6_t __riscv_vundefined_bf16m1x6(); +vbfloat16m1x7_t __riscv_vundefined_bf16m1x7(); +vbfloat16m1x8_t __riscv_vundefined_bf16m1x8(); +vbfloat16m2x2_t __riscv_vundefined_bf16m2x2(); +vbfloat16m2x3_t __riscv_vundefined_bf16m2x3(); +vbfloat16m2x4_t __riscv_vundefined_bf16m2x4(); +vbfloat16m4x2_t __riscv_vundefined_bf16m4x2(); +---- + +[[vector-insertion]] +==== Vector Insertion Intrinsics + +[,c] +---- +vbfloat16m2_t __riscv_vset_v_bf16m1_bf16m2(vbfloat16m2_t dest, size_t index, + vbfloat16m1_t value); +vbfloat16m4_t __riscv_vset_v_bf16m1_bf16m4(vbfloat16m4_t dest, size_t index, + vbfloat16m1_t value); +vbfloat16m4_t __riscv_vset_v_bf16m2_bf16m4(vbfloat16m4_t dest, size_t index, + vbfloat16m2_t value); +vbfloat16m8_t __riscv_vset_v_bf16m1_bf16m8(vbfloat16m8_t dest, size_t index, + vbfloat16m1_t value); +vbfloat16m8_t __riscv_vset_v_bf16m2_bf16m8(vbfloat16m8_t dest, size_t index, + vbfloat16m2_t value); +vbfloat16m8_t __riscv_vset_v_bf16m4_bf16m8(vbfloat16m8_t dest, size_t index, + vbfloat16m4_t value); +vbfloat16mf4x2_t __riscv_vset_v_bf16mf4_bf16mf4x2(vbfloat16mf4x2_t dest, + size_t index, + vbfloat16mf4_t value); +vbfloat16mf4x3_t __riscv_vset_v_bf16mf4_bf16mf4x3(vbfloat16mf4x3_t dest, + size_t index, + vbfloat16mf4_t value); +vbfloat16mf4x4_t __riscv_vset_v_bf16mf4_bf16mf4x4(vbfloat16mf4x4_t dest, + size_t index, + vbfloat16mf4_t value); +vbfloat16mf4x5_t __riscv_vset_v_bf16mf4_bf16mf4x5(vbfloat16mf4x5_t dest, + size_t index, + vbfloat16mf4_t value); +vbfloat16mf4x6_t __riscv_vset_v_bf16mf4_bf16mf4x6(vbfloat16mf4x6_t dest, + size_t index, + vbfloat16mf4_t value); +vbfloat16mf4x7_t __riscv_vset_v_bf16mf4_bf16mf4x7(vbfloat16mf4x7_t dest, + size_t index, + vbfloat16mf4_t value); +vbfloat16mf4x8_t __riscv_vset_v_bf16mf4_bf16mf4x8(vbfloat16mf4x8_t dest, + size_t index, + vbfloat16mf4_t value); +vbfloat16mf2x2_t __riscv_vset_v_bf16mf2_bf16mf2x2(vbfloat16mf2x2_t dest, + size_t index, + vbfloat16mf2_t value); +vbfloat16mf2x3_t __riscv_vset_v_bf16mf2_bf16mf2x3(vbfloat16mf2x3_t dest, + size_t index, + vbfloat16mf2_t value); +vbfloat16mf2x4_t __riscv_vset_v_bf16mf2_bf16mf2x4(vbfloat16mf2x4_t dest, + size_t index, + vbfloat16mf2_t value); +vbfloat16mf2x5_t __riscv_vset_v_bf16mf2_bf16mf2x5(vbfloat16mf2x5_t dest, + size_t index, + vbfloat16mf2_t value); +vbfloat16mf2x6_t __riscv_vset_v_bf16mf2_bf16mf2x6(vbfloat16mf2x6_t dest, + size_t index, + vbfloat16mf2_t value); +vbfloat16mf2x7_t __riscv_vset_v_bf16mf2_bf16mf2x7(vbfloat16mf2x7_t dest, + size_t index, + vbfloat16mf2_t value); +vbfloat16mf2x8_t __riscv_vset_v_bf16mf2_bf16mf2x8(vbfloat16mf2x8_t dest, + size_t index, + vbfloat16mf2_t value); +vbfloat16m1x2_t __riscv_vset_v_bf16m1_bf16m1x2(vbfloat16m1x2_t dest, + size_t index, + vbfloat16m1_t value); +vbfloat16m1x3_t __riscv_vset_v_bf16m1_bf16m1x3(vbfloat16m1x3_t dest, + size_t index, + vbfloat16m1_t value); +vbfloat16m1x4_t __riscv_vset_v_bf16m1_bf16m1x4(vbfloat16m1x4_t dest, + size_t index, + vbfloat16m1_t value); +vbfloat16m1x5_t __riscv_vset_v_bf16m1_bf16m1x5(vbfloat16m1x5_t dest, + size_t index, + vbfloat16m1_t value); +vbfloat16m1x6_t __riscv_vset_v_bf16m1_bf16m1x6(vbfloat16m1x6_t dest, + size_t index, + vbfloat16m1_t value); +vbfloat16m1x7_t __riscv_vset_v_bf16m1_bf16m1x7(vbfloat16m1x7_t dest, + size_t index, + vbfloat16m1_t value); +vbfloat16m1x8_t __riscv_vset_v_bf16m1_bf16m1x8(vbfloat16m1x8_t dest, + size_t index, + vbfloat16m1_t value); +vbfloat16m2x2_t __riscv_vset_v_bf16m2_bf16m2x2(vbfloat16m2x2_t dest, + size_t index, + vbfloat16m2_t value); +vbfloat16m2x3_t __riscv_vset_v_bf16m2_bf16m2x3(vbfloat16m2x3_t dest, + size_t index, + vbfloat16m2_t value); +vbfloat16m2x4_t __riscv_vset_v_bf16m2_bf16m2x4(vbfloat16m2x4_t dest, + size_t index, + vbfloat16m2_t value); +vbfloat16m4x2_t __riscv_vset_v_bf16m4_bf16m4x2(vbfloat16m4x2_t dest, + size_t index, + vbfloat16m4_t value); +---- + +[[vector-extraction]] +==== Vector Extraction Intrinsics + +[,c] +---- +vbfloat16m1_t __riscv_vget_v_bf16m2_bf16m1(vbfloat16m2_t src, size_t index); +vbfloat16m1_t __riscv_vget_v_bf16m4_bf16m1(vbfloat16m4_t src, size_t index); +vbfloat16m1_t __riscv_vget_v_bf16m8_bf16m1(vbfloat16m8_t src, size_t index); +vbfloat16m2_t __riscv_vget_v_bf16m4_bf16m2(vbfloat16m4_t src, size_t index); +vbfloat16m2_t __riscv_vget_v_bf16m8_bf16m2(vbfloat16m8_t src, size_t index); +vbfloat16m4_t __riscv_vget_v_bf16m8_bf16m4(vbfloat16m8_t src, size_t index); +vbfloat16mf4_t __riscv_vget_v_bf16mf4x2_bf16mf4(vbfloat16mf4x2_t src, + size_t index); +vbfloat16mf4_t __riscv_vget_v_bf16mf4x3_bf16mf4(vbfloat16mf4x3_t src, + size_t index); +vbfloat16mf4_t __riscv_vget_v_bf16mf4x4_bf16mf4(vbfloat16mf4x4_t src, + size_t index); +vbfloat16mf4_t __riscv_vget_v_bf16mf4x5_bf16mf4(vbfloat16mf4x5_t src, + size_t index); +vbfloat16mf4_t __riscv_vget_v_bf16mf4x6_bf16mf4(vbfloat16mf4x6_t src, + size_t index); +vbfloat16mf4_t __riscv_vget_v_bf16mf4x7_bf16mf4(vbfloat16mf4x7_t src, + size_t index); +vbfloat16mf4_t __riscv_vget_v_bf16mf4x8_bf16mf4(vbfloat16mf4x8_t src, + size_t index); +vbfloat16mf2_t __riscv_vget_v_bf16mf2x2_bf16mf2(vbfloat16mf2x2_t src, + size_t index); +vbfloat16mf2_t __riscv_vget_v_bf16mf2x3_bf16mf2(vbfloat16mf2x3_t src, + size_t index); +vbfloat16mf2_t __riscv_vget_v_bf16mf2x4_bf16mf2(vbfloat16mf2x4_t src, + size_t index); +vbfloat16mf2_t __riscv_vget_v_bf16mf2x5_bf16mf2(vbfloat16mf2x5_t src, + size_t index); +vbfloat16mf2_t __riscv_vget_v_bf16mf2x6_bf16mf2(vbfloat16mf2x6_t src, + size_t index); +vbfloat16mf2_t __riscv_vget_v_bf16mf2x7_bf16mf2(vbfloat16mf2x7_t src, + size_t index); +vbfloat16mf2_t __riscv_vget_v_bf16mf2x8_bf16mf2(vbfloat16mf2x8_t src, + size_t index); +vbfloat16m1_t __riscv_vget_v_bf16m1x2_bf16m1(vbfloat16m1x2_t src, size_t index); +vbfloat16m1_t __riscv_vget_v_bf16m1x3_bf16m1(vbfloat16m1x3_t src, size_t index); +vbfloat16m1_t __riscv_vget_v_bf16m1x4_bf16m1(vbfloat16m1x4_t src, size_t index); +vbfloat16m1_t __riscv_vget_v_bf16m1x5_bf16m1(vbfloat16m1x5_t src, size_t index); +vbfloat16m1_t __riscv_vget_v_bf16m1x6_bf16m1(vbfloat16m1x6_t src, size_t index); +vbfloat16m1_t __riscv_vget_v_bf16m1x7_bf16m1(vbfloat16m1x7_t src, size_t index); +vbfloat16m1_t __riscv_vget_v_bf16m1x8_bf16m1(vbfloat16m1x8_t src, size_t index); +vbfloat16m2_t __riscv_vget_v_bf16m2x2_bf16m2(vbfloat16m2x2_t src, size_t index); +vbfloat16m2_t __riscv_vget_v_bf16m2x3_bf16m2(vbfloat16m2x3_t src, size_t index); +vbfloat16m2_t __riscv_vget_v_bf16m2x4_bf16m2(vbfloat16m2x4_t src, size_t index); +vbfloat16m4_t __riscv_vget_v_bf16m4x2_bf16m4(vbfloat16m4x2_t src, size_t index); +---- + +[[vector-creation]] +==== Vector Creation Intrinsics + +[,c] +---- +vbfloat16m2_t __riscv_vcreate_v_bf16m1_bf16m2(vbfloat16m1_t v0, + vbfloat16m1_t v1); +vbfloat16m4_t __riscv_vcreate_v_bf16m1_bf16m4(vbfloat16m1_t v0, + vbfloat16m1_t v1, + vbfloat16m1_t v2, + vbfloat16m1_t v3); +vbfloat16m8_t __riscv_vcreate_v_bf16m1_bf16m8( + vbfloat16m1_t v0, vbfloat16m1_t v1, vbfloat16m1_t v2, vbfloat16m1_t v3, + vbfloat16m1_t v4, vbfloat16m1_t v5, vbfloat16m1_t v6, vbfloat16m1_t v7); +vbfloat16m4_t __riscv_vcreate_v_bf16m2_bf16m4(vbfloat16m2_t v0, + vbfloat16m2_t v1); +vbfloat16m8_t __riscv_vcreate_v_bf16m2_bf16m8(vbfloat16m2_t v0, + vbfloat16m2_t v1, + vbfloat16m2_t v2, + vbfloat16m2_t v3); +vbfloat16m8_t __riscv_vcreate_v_bf16m4_bf16m8(vbfloat16m4_t v0, + vbfloat16m4_t v1); +vbfloat16mf4x2_t __riscv_vcreate_v_bf16mf4x2(vbfloat16mf4_t v0, + vbfloat16mf4_t v1); +vbfloat16mf4x3_t __riscv_vcreate_v_bf16mf4x3(vbfloat16mf4_t v0, + vbfloat16mf4_t v1, + vbfloat16mf4_t v2); +vbfloat16mf4x4_t __riscv_vcreate_v_bf16mf4x4(vbfloat16mf4_t v0, + vbfloat16mf4_t v1, + vbfloat16mf4_t v2, + vbfloat16mf4_t v3); +vbfloat16mf4x5_t __riscv_vcreate_v_bf16mf4x5(vbfloat16mf4_t v0, + vbfloat16mf4_t v1, + vbfloat16mf4_t v2, + vbfloat16mf4_t v3, + vbfloat16mf4_t v4); +vbfloat16mf4x6_t +__riscv_vcreate_v_bf16mf4x6(vbfloat16mf4_t v0, vbfloat16mf4_t v1, + vbfloat16mf4_t v2, vbfloat16mf4_t v3, + vbfloat16mf4_t v4, vbfloat16mf4_t v5); +vbfloat16mf4x7_t __riscv_vcreate_v_bf16mf4x7( + vbfloat16mf4_t v0, vbfloat16mf4_t v1, vbfloat16mf4_t v2, vbfloat16mf4_t v3, + vbfloat16mf4_t v4, vbfloat16mf4_t v5, vbfloat16mf4_t v6); +vbfloat16mf4x8_t __riscv_vcreate_v_bf16mf4x8( + vbfloat16mf4_t v0, vbfloat16mf4_t v1, vbfloat16mf4_t v2, vbfloat16mf4_t v3, + vbfloat16mf4_t v4, vbfloat16mf4_t v5, vbfloat16mf4_t v6, vbfloat16mf4_t v7); +vbfloat16mf2x2_t __riscv_vcreate_v_bf16mf2x2(vbfloat16mf2_t v0, + vbfloat16mf2_t v1); +vbfloat16mf2x3_t __riscv_vcreate_v_bf16mf2x3(vbfloat16mf2_t v0, + vbfloat16mf2_t v1, + vbfloat16mf2_t v2); +vbfloat16mf2x4_t __riscv_vcreate_v_bf16mf2x4(vbfloat16mf2_t v0, + vbfloat16mf2_t v1, + vbfloat16mf2_t v2, + vbfloat16mf2_t v3); +vbfloat16mf2x5_t __riscv_vcreate_v_bf16mf2x5(vbfloat16mf2_t v0, + vbfloat16mf2_t v1, + vbfloat16mf2_t v2, + vbfloat16mf2_t v3, + vbfloat16mf2_t v4); +vbfloat16mf2x6_t +__riscv_vcreate_v_bf16mf2x6(vbfloat16mf2_t v0, vbfloat16mf2_t v1, + vbfloat16mf2_t v2, vbfloat16mf2_t v3, + vbfloat16mf2_t v4, vbfloat16mf2_t v5); +vbfloat16mf2x7_t __riscv_vcreate_v_bf16mf2x7( + vbfloat16mf2_t v0, vbfloat16mf2_t v1, vbfloat16mf2_t v2, vbfloat16mf2_t v3, + vbfloat16mf2_t v4, vbfloat16mf2_t v5, vbfloat16mf2_t v6); +vbfloat16mf2x8_t __riscv_vcreate_v_bf16mf2x8( + vbfloat16mf2_t v0, vbfloat16mf2_t v1, vbfloat16mf2_t v2, vbfloat16mf2_t v3, + vbfloat16mf2_t v4, vbfloat16mf2_t v5, vbfloat16mf2_t v6, vbfloat16mf2_t v7); +vbfloat16m1x2_t __riscv_vcreate_v_bf16m1x2(vbfloat16m1_t v0, vbfloat16m1_t v1); +vbfloat16m1x3_t __riscv_vcreate_v_bf16m1x3(vbfloat16m1_t v0, vbfloat16m1_t v1, + vbfloat16m1_t v2); +vbfloat16m1x4_t __riscv_vcreate_v_bf16m1x4(vbfloat16m1_t v0, vbfloat16m1_t v1, + vbfloat16m1_t v2, vbfloat16m1_t v3); +vbfloat16m1x5_t __riscv_vcreate_v_bf16m1x5(vbfloat16m1_t v0, vbfloat16m1_t v1, + vbfloat16m1_t v2, vbfloat16m1_t v3, + vbfloat16m1_t v4); +vbfloat16m1x6_t __riscv_vcreate_v_bf16m1x6(vbfloat16m1_t v0, vbfloat16m1_t v1, + vbfloat16m1_t v2, vbfloat16m1_t v3, + vbfloat16m1_t v4, vbfloat16m1_t v5); +vbfloat16m1x7_t __riscv_vcreate_v_bf16m1x7(vbfloat16m1_t v0, vbfloat16m1_t v1, + vbfloat16m1_t v2, vbfloat16m1_t v3, + vbfloat16m1_t v4, vbfloat16m1_t v5, + vbfloat16m1_t v6); +vbfloat16m1x8_t __riscv_vcreate_v_bf16m1x8(vbfloat16m1_t v0, vbfloat16m1_t v1, + vbfloat16m1_t v2, vbfloat16m1_t v3, + vbfloat16m1_t v4, vbfloat16m1_t v5, + vbfloat16m1_t v6, vbfloat16m1_t v7); +vbfloat16m2x2_t __riscv_vcreate_v_bf16m2x2(vbfloat16m2_t v0, vbfloat16m2_t v1); +vbfloat16m2x3_t __riscv_vcreate_v_bf16m2x3(vbfloat16m2_t v0, vbfloat16m2_t v1, + vbfloat16m2_t v2); +vbfloat16m2x4_t __riscv_vcreate_v_bf16m2x4(vbfloat16m2_t v0, vbfloat16m2_t v1, + vbfloat16m2_t v2, vbfloat16m2_t v3); +vbfloat16m4x2_t __riscv_vcreate_v_bf16m4x2(vbfloat16m4_t v0, vbfloat16m4_t v1); +---- diff --git a/auto-generated/bfloat16/llvm-api-tests/vcreate.c b/auto-generated/bfloat16/llvm-api-tests/vcreate.c new file mode 100644 index 000000000..1817fe2fa --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vcreate.c @@ -0,0 +1,183 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16m2_t test_vcreate_v_bf16m1_bf16m2(vbfloat16m1_t v0, vbfloat16m1_t v1) { + return __riscv_vcreate_v_bf16m1_bf16m2(v0, v1); +} + +vbfloat16m4_t test_vcreate_v_bf16m1_bf16m4(vbfloat16m1_t v0, vbfloat16m1_t v1, + vbfloat16m1_t v2, vbfloat16m1_t v3) { + return __riscv_vcreate_v_bf16m1_bf16m4(v0, v1, v2, v3); +} + +vbfloat16m8_t test_vcreate_v_bf16m1_bf16m8(vbfloat16m1_t v0, vbfloat16m1_t v1, + vbfloat16m1_t v2, vbfloat16m1_t v3, + vbfloat16m1_t v4, vbfloat16m1_t v5, + vbfloat16m1_t v6, vbfloat16m1_t v7) { + return __riscv_vcreate_v_bf16m1_bf16m8(v0, v1, v2, v3, v4, v5, v6, v7); +} + +vbfloat16m4_t test_vcreate_v_bf16m2_bf16m4(vbfloat16m2_t v0, vbfloat16m2_t v1) { + return __riscv_vcreate_v_bf16m2_bf16m4(v0, v1); +} + +vbfloat16m8_t test_vcreate_v_bf16m2_bf16m8(vbfloat16m2_t v0, vbfloat16m2_t v1, + vbfloat16m2_t v2, vbfloat16m2_t v3) { + return __riscv_vcreate_v_bf16m2_bf16m8(v0, v1, v2, v3); +} + +vbfloat16m8_t test_vcreate_v_bf16m4_bf16m8(vbfloat16m4_t v0, vbfloat16m4_t v1) { + return __riscv_vcreate_v_bf16m4_bf16m8(v0, v1); +} + +vbfloat16mf4x2_t test_vcreate_v_bf16mf4x2(vbfloat16mf4_t v0, + vbfloat16mf4_t v1) { + return __riscv_vcreate_v_bf16mf4x2(v0, v1); +} + +vbfloat16mf4x3_t test_vcreate_v_bf16mf4x3(vbfloat16mf4_t v0, vbfloat16mf4_t v1, + vbfloat16mf4_t v2) { + return __riscv_vcreate_v_bf16mf4x3(v0, v1, v2); +} + +vbfloat16mf4x4_t test_vcreate_v_bf16mf4x4(vbfloat16mf4_t v0, vbfloat16mf4_t v1, + vbfloat16mf4_t v2, + vbfloat16mf4_t v3) { + return __riscv_vcreate_v_bf16mf4x4(v0, v1, v2, v3); +} + +vbfloat16mf4x5_t test_vcreate_v_bf16mf4x5(vbfloat16mf4_t v0, vbfloat16mf4_t v1, + vbfloat16mf4_t v2, vbfloat16mf4_t v3, + vbfloat16mf4_t v4) { + return __riscv_vcreate_v_bf16mf4x5(v0, v1, v2, v3, v4); +} + +vbfloat16mf4x6_t test_vcreate_v_bf16mf4x6(vbfloat16mf4_t v0, vbfloat16mf4_t v1, + vbfloat16mf4_t v2, vbfloat16mf4_t v3, + vbfloat16mf4_t v4, + vbfloat16mf4_t v5) { + return __riscv_vcreate_v_bf16mf4x6(v0, v1, v2, v3, v4, v5); +} + +vbfloat16mf4x7_t test_vcreate_v_bf16mf4x7(vbfloat16mf4_t v0, vbfloat16mf4_t v1, + vbfloat16mf4_t v2, vbfloat16mf4_t v3, + vbfloat16mf4_t v4, vbfloat16mf4_t v5, + vbfloat16mf4_t v6) { + return __riscv_vcreate_v_bf16mf4x7(v0, v1, v2, v3, v4, v5, v6); +} + +vbfloat16mf4x8_t test_vcreate_v_bf16mf4x8(vbfloat16mf4_t v0, vbfloat16mf4_t v1, + vbfloat16mf4_t v2, vbfloat16mf4_t v3, + vbfloat16mf4_t v4, vbfloat16mf4_t v5, + vbfloat16mf4_t v6, + vbfloat16mf4_t v7) { + return __riscv_vcreate_v_bf16mf4x8(v0, v1, v2, v3, v4, v5, v6, v7); +} + +vbfloat16mf2x2_t test_vcreate_v_bf16mf2x2(vbfloat16mf2_t v0, + vbfloat16mf2_t v1) { + return __riscv_vcreate_v_bf16mf2x2(v0, v1); +} + +vbfloat16mf2x3_t test_vcreate_v_bf16mf2x3(vbfloat16mf2_t v0, vbfloat16mf2_t v1, + vbfloat16mf2_t v2) { + return __riscv_vcreate_v_bf16mf2x3(v0, v1, v2); +} + +vbfloat16mf2x4_t test_vcreate_v_bf16mf2x4(vbfloat16mf2_t v0, vbfloat16mf2_t v1, + vbfloat16mf2_t v2, + vbfloat16mf2_t v3) { + return __riscv_vcreate_v_bf16mf2x4(v0, v1, v2, v3); +} + +vbfloat16mf2x5_t test_vcreate_v_bf16mf2x5(vbfloat16mf2_t v0, vbfloat16mf2_t v1, + vbfloat16mf2_t v2, vbfloat16mf2_t v3, + vbfloat16mf2_t v4) { + return __riscv_vcreate_v_bf16mf2x5(v0, v1, v2, v3, v4); +} + +vbfloat16mf2x6_t test_vcreate_v_bf16mf2x6(vbfloat16mf2_t v0, vbfloat16mf2_t v1, + vbfloat16mf2_t v2, vbfloat16mf2_t v3, + vbfloat16mf2_t v4, + vbfloat16mf2_t v5) { + return __riscv_vcreate_v_bf16mf2x6(v0, v1, v2, v3, v4, v5); +} + +vbfloat16mf2x7_t test_vcreate_v_bf16mf2x7(vbfloat16mf2_t v0, vbfloat16mf2_t v1, + vbfloat16mf2_t v2, vbfloat16mf2_t v3, + vbfloat16mf2_t v4, vbfloat16mf2_t v5, + vbfloat16mf2_t v6) { + return __riscv_vcreate_v_bf16mf2x7(v0, v1, v2, v3, v4, v5, v6); +} + +vbfloat16mf2x8_t test_vcreate_v_bf16mf2x8(vbfloat16mf2_t v0, vbfloat16mf2_t v1, + vbfloat16mf2_t v2, vbfloat16mf2_t v3, + vbfloat16mf2_t v4, vbfloat16mf2_t v5, + vbfloat16mf2_t v6, + vbfloat16mf2_t v7) { + return __riscv_vcreate_v_bf16mf2x8(v0, v1, v2, v3, v4, v5, v6, v7); +} + +vbfloat16m1x2_t test_vcreate_v_bf16m1x2(vbfloat16m1_t v0, vbfloat16m1_t v1) { + return __riscv_vcreate_v_bf16m1x2(v0, v1); +} + +vbfloat16m1x3_t test_vcreate_v_bf16m1x3(vbfloat16m1_t v0, vbfloat16m1_t v1, + vbfloat16m1_t v2) { + return __riscv_vcreate_v_bf16m1x3(v0, v1, v2); +} + +vbfloat16m1x4_t test_vcreate_v_bf16m1x4(vbfloat16m1_t v0, vbfloat16m1_t v1, + vbfloat16m1_t v2, vbfloat16m1_t v3) { + return __riscv_vcreate_v_bf16m1x4(v0, v1, v2, v3); +} + +vbfloat16m1x5_t test_vcreate_v_bf16m1x5(vbfloat16m1_t v0, vbfloat16m1_t v1, + vbfloat16m1_t v2, vbfloat16m1_t v3, + vbfloat16m1_t v4) { + return __riscv_vcreate_v_bf16m1x5(v0, v1, v2, v3, v4); +} + +vbfloat16m1x6_t test_vcreate_v_bf16m1x6(vbfloat16m1_t v0, vbfloat16m1_t v1, + vbfloat16m1_t v2, vbfloat16m1_t v3, + vbfloat16m1_t v4, vbfloat16m1_t v5) { + return __riscv_vcreate_v_bf16m1x6(v0, v1, v2, v3, v4, v5); +} + +vbfloat16m1x7_t test_vcreate_v_bf16m1x7(vbfloat16m1_t v0, vbfloat16m1_t v1, + vbfloat16m1_t v2, vbfloat16m1_t v3, + vbfloat16m1_t v4, vbfloat16m1_t v5, + vbfloat16m1_t v6) { + return __riscv_vcreate_v_bf16m1x7(v0, v1, v2, v3, v4, v5, v6); +} + +vbfloat16m1x8_t test_vcreate_v_bf16m1x8(vbfloat16m1_t v0, vbfloat16m1_t v1, + vbfloat16m1_t v2, vbfloat16m1_t v3, + vbfloat16m1_t v4, vbfloat16m1_t v5, + vbfloat16m1_t v6, vbfloat16m1_t v7) { + return __riscv_vcreate_v_bf16m1x8(v0, v1, v2, v3, v4, v5, v6, v7); +} + +vbfloat16m2x2_t test_vcreate_v_bf16m2x2(vbfloat16m2_t v0, vbfloat16m2_t v1) { + return __riscv_vcreate_v_bf16m2x2(v0, v1); +} + +vbfloat16m2x3_t test_vcreate_v_bf16m2x3(vbfloat16m2_t v0, vbfloat16m2_t v1, + vbfloat16m2_t v2) { + return __riscv_vcreate_v_bf16m2x3(v0, v1, v2); +} + +vbfloat16m2x4_t test_vcreate_v_bf16m2x4(vbfloat16m2_t v0, vbfloat16m2_t v1, + vbfloat16m2_t v2, vbfloat16m2_t v3) { + return __riscv_vcreate_v_bf16m2x4(v0, v1, v2, v3); +} + +vbfloat16m4x2_t test_vcreate_v_bf16m4x2(vbfloat16m4_t v0, vbfloat16m4_t v1) { + return __riscv_vcreate_v_bf16m4x2(v0, v1); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vfncvtbf16.c b/auto-generated/bfloat16/llvm-api-tests/vfncvtbf16.c new file mode 100644 index 000000000..8aaabc00a --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vfncvtbf16.c @@ -0,0 +1,98 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf4(vs2, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf2(vs2, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m1(vs2, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m2(vs2, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m4(vs2, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_m(vbool64_t vm, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf4_m(vm, vs2, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_m(vbool32_t vm, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf2_m(vm, vs2, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m1_m(vm, vs2, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m2_m(vm, vs2, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m4_m(vm, vs2, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_rm(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf4_rm(vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_rm(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf2_rm(vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m1_rm(vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_rm(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m2_rm(vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_rm(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m4_rm(vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16mf4_t +test_vfncvtbf16_f_f_w_bf16mf4_rm_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_rm_m(vbool32_t vm, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm_m(vbool16_t vm, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_rm_m(vbool8_t vm, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_rm_m(vbool4_t vm, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vfwcvtbf16.c b/auto-generated/bfloat16/llvm-api-tests/vfwcvtbf16.c new file mode 100644 index 000000000..eac92fe54 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vfwcvtbf16.c @@ -0,0 +1,53 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32mf2(vs2, vl); +} + +vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m1(vs2, vl); +} + +vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m2(vs2, vl); +} + +vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m4(vs2, vl); +} + +vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m8(vs2, vl); +} + +vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32mf2_m(vm, vs2, vl); +} + +vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m1_m(vm, vs2, vl); +} + +vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m2_m(vm, vs2, vl); +} + +vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m4_m(vm, vs2, vl); +} + +vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m8_m(vm, vs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vfwmaccbf16.c b/auto-generated/bfloat16/llvm-api-tests/vfwmaccbf16.c new file mode 100644 index 000000000..d1a8f0987 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vfwmaccbf16.c @@ -0,0 +1,239 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2(vfloat32mf2_t vd, vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32mf2(vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32mf2(vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m1(vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m1(vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m2(vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m2(vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m4(vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m4(vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m8(vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m8(vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32mf2_m(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32mf2_m(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m1_m(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m1_m(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m2_m(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m2_m(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m4_m(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m4_m(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m8_m(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m8_m(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_rm(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_rm(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_rm(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_rm(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_rm(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_rm(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_rm(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_rm(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_rm(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32mf2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32mf2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m1_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vget.c b/auto-generated/bfloat16/llvm-api-tests/vget.c new file mode 100644 index 000000000..ec87eaaa9 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vget.c @@ -0,0 +1,146 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16m1_t test_vget_v_bf16m2_bf16m1(vbfloat16m2_t src, size_t index) { + return __riscv_vget_v_bf16m2_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m4_bf16m1(vbfloat16m4_t src, size_t index) { + return __riscv_vget_v_bf16m4_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m8_bf16m1(vbfloat16m8_t src, size_t index) { + return __riscv_vget_v_bf16m8_bf16m1(src, 0); +} + +vbfloat16m2_t test_vget_v_bf16m4_bf16m2(vbfloat16m4_t src, size_t index) { + return __riscv_vget_v_bf16m4_bf16m2(src, 0); +} + +vbfloat16m2_t test_vget_v_bf16m8_bf16m2(vbfloat16m8_t src, size_t index) { + return __riscv_vget_v_bf16m8_bf16m2(src, 0); +} + +vbfloat16m4_t test_vget_v_bf16m8_bf16m4(vbfloat16m8_t src, size_t index) { + return __riscv_vget_v_bf16m8_bf16m4(src, 0); +} + +vbfloat16mf4_t test_vget_v_bf16mf4x2_bf16mf4(vbfloat16mf4x2_t src, + size_t index) { + return __riscv_vget_v_bf16mf4x2_bf16mf4(src, 0); +} + +vbfloat16mf4_t test_vget_v_bf16mf4x3_bf16mf4(vbfloat16mf4x3_t src, + size_t index) { + return __riscv_vget_v_bf16mf4x3_bf16mf4(src, 0); +} + +vbfloat16mf4_t test_vget_v_bf16mf4x4_bf16mf4(vbfloat16mf4x4_t src, + size_t index) { + return __riscv_vget_v_bf16mf4x4_bf16mf4(src, 0); +} + +vbfloat16mf4_t test_vget_v_bf16mf4x5_bf16mf4(vbfloat16mf4x5_t src, + size_t index) { + return __riscv_vget_v_bf16mf4x5_bf16mf4(src, 0); +} + +vbfloat16mf4_t test_vget_v_bf16mf4x6_bf16mf4(vbfloat16mf4x6_t src, + size_t index) { + return __riscv_vget_v_bf16mf4x6_bf16mf4(src, 0); +} + +vbfloat16mf4_t test_vget_v_bf16mf4x7_bf16mf4(vbfloat16mf4x7_t src, + size_t index) { + return __riscv_vget_v_bf16mf4x7_bf16mf4(src, 0); +} + +vbfloat16mf4_t test_vget_v_bf16mf4x8_bf16mf4(vbfloat16mf4x8_t src, + size_t index) { + return __riscv_vget_v_bf16mf4x8_bf16mf4(src, 0); +} + +vbfloat16mf2_t test_vget_v_bf16mf2x2_bf16mf2(vbfloat16mf2x2_t src, + size_t index) { + return __riscv_vget_v_bf16mf2x2_bf16mf2(src, 0); +} + +vbfloat16mf2_t test_vget_v_bf16mf2x3_bf16mf2(vbfloat16mf2x3_t src, + size_t index) { + return __riscv_vget_v_bf16mf2x3_bf16mf2(src, 0); +} + +vbfloat16mf2_t test_vget_v_bf16mf2x4_bf16mf2(vbfloat16mf2x4_t src, + size_t index) { + return __riscv_vget_v_bf16mf2x4_bf16mf2(src, 0); +} + +vbfloat16mf2_t test_vget_v_bf16mf2x5_bf16mf2(vbfloat16mf2x5_t src, + size_t index) { + return __riscv_vget_v_bf16mf2x5_bf16mf2(src, 0); +} + +vbfloat16mf2_t test_vget_v_bf16mf2x6_bf16mf2(vbfloat16mf2x6_t src, + size_t index) { + return __riscv_vget_v_bf16mf2x6_bf16mf2(src, 0); +} + +vbfloat16mf2_t test_vget_v_bf16mf2x7_bf16mf2(vbfloat16mf2x7_t src, + size_t index) { + return __riscv_vget_v_bf16mf2x7_bf16mf2(src, 0); +} + +vbfloat16mf2_t test_vget_v_bf16mf2x8_bf16mf2(vbfloat16mf2x8_t src, + size_t index) { + return __riscv_vget_v_bf16mf2x8_bf16mf2(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m1x2_bf16m1(vbfloat16m1x2_t src, size_t index) { + return __riscv_vget_v_bf16m1x2_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m1x3_bf16m1(vbfloat16m1x3_t src, size_t index) { + return __riscv_vget_v_bf16m1x3_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m1x4_bf16m1(vbfloat16m1x4_t src, size_t index) { + return __riscv_vget_v_bf16m1x4_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m1x5_bf16m1(vbfloat16m1x5_t src, size_t index) { + return __riscv_vget_v_bf16m1x5_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m1x6_bf16m1(vbfloat16m1x6_t src, size_t index) { + return __riscv_vget_v_bf16m1x6_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m1x7_bf16m1(vbfloat16m1x7_t src, size_t index) { + return __riscv_vget_v_bf16m1x7_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m1x8_bf16m1(vbfloat16m1x8_t src, size_t index) { + return __riscv_vget_v_bf16m1x8_bf16m1(src, 0); +} + +vbfloat16m2_t test_vget_v_bf16m2x2_bf16m2(vbfloat16m2x2_t src, size_t index) { + return __riscv_vget_v_bf16m2x2_bf16m2(src, 0); +} + +vbfloat16m2_t test_vget_v_bf16m2x3_bf16m2(vbfloat16m2x3_t src, size_t index) { + return __riscv_vget_v_bf16m2x3_bf16m2(src, 0); +} + +vbfloat16m2_t test_vget_v_bf16m2x4_bf16m2(vbfloat16m2x4_t src, size_t index) { + return __riscv_vget_v_bf16m2x4_bf16m2(src, 0); +} + +vbfloat16m4_t test_vget_v_bf16m4x2_bf16m4(vbfloat16m4x2_t src, size_t index) { + return __riscv_vget_v_bf16m4x2_bf16m4(src, 0); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vle16.c b/auto-generated/bfloat16/llvm-api-tests/vle16.c new file mode 100644 index 000000000..eccbebef2 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vle16.c @@ -0,0 +1,59 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vle16_v_bf16mf4(const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16mf4(rs1, vl); +} + +vbfloat16mf2_t test_vle16_v_bf16mf2(const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16mf2(rs1, vl); +} + +vbfloat16m1_t test_vle16_v_bf16m1(const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m1(rs1, vl); +} + +vbfloat16m2_t test_vle16_v_bf16m2(const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m2(rs1, vl); +} + +vbfloat16m4_t test_vle16_v_bf16m4(const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m4(rs1, vl); +} + +vbfloat16m8_t test_vle16_v_bf16m8(const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m8(rs1, vl); +} + +vbfloat16mf4_t test_vle16_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16_v_bf16mf4_m(vm, rs1, vl); +} + +vbfloat16mf2_t test_vle16_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16_v_bf16mf2_m(vm, rs1, vl); +} + +vbfloat16m1_t test_vle16_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16_v_bf16m1_m(vm, rs1, vl); +} + +vbfloat16m2_t test_vle16_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m2_m(vm, rs1, vl); +} + +vbfloat16m4_t test_vle16_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m4_m(vm, rs1, vl); +} + +vbfloat16m8_t test_vle16_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m8_m(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vle16ff.c b/auto-generated/bfloat16/llvm-api-tests/vle16ff.c new file mode 100644 index 000000000..d65364df8 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vle16ff.c @@ -0,0 +1,68 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vle16ff_v_bf16mf4(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16mf4(rs1, new_vl, vl); +} + +vbfloat16mf2_t test_vle16ff_v_bf16mf2(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16mf2(rs1, new_vl, vl); +} + +vbfloat16m1_t test_vle16ff_v_bf16m1(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m1(rs1, new_vl, vl); +} + +vbfloat16m2_t test_vle16ff_v_bf16m2(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m2(rs1, new_vl, vl); +} + +vbfloat16m4_t test_vle16ff_v_bf16m4(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m4(rs1, new_vl, vl); +} + +vbfloat16m8_t test_vle16ff_v_bf16m8(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m8(rs1, new_vl, vl); +} + +vbfloat16mf4_t test_vle16ff_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_bf16mf4_m(vm, rs1, new_vl, vl); +} + +vbfloat16mf2_t test_vle16ff_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_bf16mf2_m(vm, rs1, new_vl, vl); +} + +vbfloat16m1_t test_vle16ff_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_bf16m1_m(vm, rs1, new_vl, vl); +} + +vbfloat16m2_t test_vle16ff_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_bf16m2_m(vm, rs1, new_vl, vl); +} + +vbfloat16m4_t test_vle16ff_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_bf16m4_m(vm, rs1, new_vl, vl); +} + +vbfloat16m8_t test_vle16ff_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_bf16m8_m(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vlmul_ext_v.c b/auto-generated/bfloat16/llvm-api-tests/vlmul_ext_v.c new file mode 100644 index 000000000..dfefe862d --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vlmul_ext_v.c @@ -0,0 +1,68 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf2_t test_vlmul_ext_v_bf16mf4_bf16mf2(vbfloat16mf4_t value) { + return __riscv_vlmul_ext_v_bf16mf4_bf16mf2(value); +} + +vbfloat16m1_t test_vlmul_ext_v_bf16mf4_bf16m1(vbfloat16mf4_t value) { + return __riscv_vlmul_ext_v_bf16mf4_bf16m1(value); +} + +vbfloat16m2_t test_vlmul_ext_v_bf16mf4_bf16m2(vbfloat16mf4_t value) { + return __riscv_vlmul_ext_v_bf16mf4_bf16m2(value); +} + +vbfloat16m4_t test_vlmul_ext_v_bf16mf4_bf16m4(vbfloat16mf4_t value) { + return __riscv_vlmul_ext_v_bf16mf4_bf16m4(value); +} + +vbfloat16m8_t test_vlmul_ext_v_bf16mf4_bf16m8(vbfloat16mf4_t value) { + return __riscv_vlmul_ext_v_bf16mf4_bf16m8(value); +} + +vbfloat16m1_t test_vlmul_ext_v_bf16mf2_bf16m1(vbfloat16mf2_t value) { + return __riscv_vlmul_ext_v_bf16mf2_bf16m1(value); +} + +vbfloat16m2_t test_vlmul_ext_v_bf16mf2_bf16m2(vbfloat16mf2_t value) { + return __riscv_vlmul_ext_v_bf16mf2_bf16m2(value); +} + +vbfloat16m4_t test_vlmul_ext_v_bf16mf2_bf16m4(vbfloat16mf2_t value) { + return __riscv_vlmul_ext_v_bf16mf2_bf16m4(value); +} + +vbfloat16m8_t test_vlmul_ext_v_bf16mf2_bf16m8(vbfloat16mf2_t value) { + return __riscv_vlmul_ext_v_bf16mf2_bf16m8(value); +} + +vbfloat16m2_t test_vlmul_ext_v_bf16m1_bf16m2(vbfloat16m1_t value) { + return __riscv_vlmul_ext_v_bf16m1_bf16m2(value); +} + +vbfloat16m4_t test_vlmul_ext_v_bf16m1_bf16m4(vbfloat16m1_t value) { + return __riscv_vlmul_ext_v_bf16m1_bf16m4(value); +} + +vbfloat16m8_t test_vlmul_ext_v_bf16m1_bf16m8(vbfloat16m1_t value) { + return __riscv_vlmul_ext_v_bf16m1_bf16m8(value); +} + +vbfloat16m4_t test_vlmul_ext_v_bf16m2_bf16m4(vbfloat16m2_t value) { + return __riscv_vlmul_ext_v_bf16m2_bf16m4(value); +} + +vbfloat16m8_t test_vlmul_ext_v_bf16m2_bf16m8(vbfloat16m2_t value) { + return __riscv_vlmul_ext_v_bf16m2_bf16m8(value); +} + +vbfloat16m8_t test_vlmul_ext_v_bf16m4_bf16m8(vbfloat16m4_t value) { + return __riscv_vlmul_ext_v_bf16m4_bf16m8(value); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vlmul_trunc_v.c b/auto-generated/bfloat16/llvm-api-tests/vlmul_trunc_v.c new file mode 100644 index 000000000..3442c3407 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vlmul_trunc_v.c @@ -0,0 +1,68 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vlmul_trunc_v_bf16mf2_bf16mf4(vbfloat16mf2_t value) { + return __riscv_vlmul_trunc_v_bf16mf2_bf16mf4(value); +} + +vbfloat16mf4_t test_vlmul_trunc_v_bf16m1_bf16mf4(vbfloat16m1_t value) { + return __riscv_vlmul_trunc_v_bf16m1_bf16mf4(value); +} + +vbfloat16mf2_t test_vlmul_trunc_v_bf16m1_bf16mf2(vbfloat16m1_t value) { + return __riscv_vlmul_trunc_v_bf16m1_bf16mf2(value); +} + +vbfloat16mf4_t test_vlmul_trunc_v_bf16m2_bf16mf4(vbfloat16m2_t value) { + return __riscv_vlmul_trunc_v_bf16m2_bf16mf4(value); +} + +vbfloat16mf2_t test_vlmul_trunc_v_bf16m2_bf16mf2(vbfloat16m2_t value) { + return __riscv_vlmul_trunc_v_bf16m2_bf16mf2(value); +} + +vbfloat16m1_t test_vlmul_trunc_v_bf16m2_bf16m1(vbfloat16m2_t value) { + return __riscv_vlmul_trunc_v_bf16m2_bf16m1(value); +} + +vbfloat16mf4_t test_vlmul_trunc_v_bf16m4_bf16mf4(vbfloat16m4_t value) { + return __riscv_vlmul_trunc_v_bf16m4_bf16mf4(value); +} + +vbfloat16mf2_t test_vlmul_trunc_v_bf16m4_bf16mf2(vbfloat16m4_t value) { + return __riscv_vlmul_trunc_v_bf16m4_bf16mf2(value); +} + +vbfloat16m1_t test_vlmul_trunc_v_bf16m4_bf16m1(vbfloat16m4_t value) { + return __riscv_vlmul_trunc_v_bf16m4_bf16m1(value); +} + +vbfloat16m2_t test_vlmul_trunc_v_bf16m4_bf16m2(vbfloat16m4_t value) { + return __riscv_vlmul_trunc_v_bf16m4_bf16m2(value); +} + +vbfloat16mf4_t test_vlmul_trunc_v_bf16m8_bf16mf4(vbfloat16m8_t value) { + return __riscv_vlmul_trunc_v_bf16m8_bf16mf4(value); +} + +vbfloat16mf2_t test_vlmul_trunc_v_bf16m8_bf16mf2(vbfloat16m8_t value) { + return __riscv_vlmul_trunc_v_bf16m8_bf16mf2(value); +} + +vbfloat16m1_t test_vlmul_trunc_v_bf16m8_bf16m1(vbfloat16m8_t value) { + return __riscv_vlmul_trunc_v_bf16m8_bf16m1(value); +} + +vbfloat16m2_t test_vlmul_trunc_v_bf16m8_bf16m2(vbfloat16m8_t value) { + return __riscv_vlmul_trunc_v_bf16m8_bf16m2(value); +} + +vbfloat16m4_t test_vlmul_trunc_v_bf16m8_bf16m4(vbfloat16m8_t value) { + return __riscv_vlmul_trunc_v_bf16m8_bf16m4(value); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vloxei16.c b/auto-generated/bfloat16/llvm-api-tests/vloxei16.c new file mode 100644 index 000000000..ee46c9333 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vloxei16.c @@ -0,0 +1,68 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vloxei16_v_bf16mf4(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16mf4(rs1, rs2, vl); +} + +vbfloat16mf2_t test_vloxei16_v_bf16mf2(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16mf2(rs1, rs2, vl); +} + +vbfloat16m1_t test_vloxei16_v_bf16m1(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m1(rs1, rs2, vl); +} + +vbfloat16m2_t test_vloxei16_v_bf16m2(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m2(rs1, rs2, vl); +} + +vbfloat16m4_t test_vloxei16_v_bf16m4(const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m4(rs1, rs2, vl); +} + +vbfloat16m8_t test_vloxei16_v_bf16m8(const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m8(rs1, rs2, vl); +} + +vbfloat16mf4_t test_vloxei16_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_bf16mf4_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vloxei16_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_bf16mf2_m(vm, rs1, rs2, vl); +} + +vbfloat16m1_t test_vloxei16_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_bf16m1_m(vm, rs1, rs2, vl); +} + +vbfloat16m2_t test_vloxei16_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_bf16m2_m(vm, rs1, rs2, vl); +} + +vbfloat16m4_t test_vloxei16_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_bf16m4_m(vm, rs1, rs2, vl); +} + +vbfloat16m8_t test_vloxei16_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1, + vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_bf16m8_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vloxseg2ei16.c b/auto-generated/bfloat16/llvm-api-tests/vloxseg2ei16.c new file mode 100644 index 000000000..6711e88a3 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vloxseg2ei16.c @@ -0,0 +1,60 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16mf4x2(rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16mf2x2(rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m1x2(rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m2x2(rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2(const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m4x2(rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16mf4x2_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16mf2x2_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m1x2_m(vm, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m2x2_m(vm, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m4x2_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vloxseg3ei16.c b/auto-generated/bfloat16/llvm-api-tests/vloxseg3ei16.c new file mode 100644 index 000000000..befe30698 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vloxseg3ei16.c @@ -0,0 +1,50 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16mf4x3(rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16mf2x3(rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxseg3ei16_v_bf16m1x3(rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vloxseg3ei16_v_bf16m2x3(rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16mf4x3_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16mf2x3_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16m1x3_m(vm, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16m2x3_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vloxseg4ei16.c b/auto-generated/bfloat16/llvm-api-tests/vloxseg4ei16.c new file mode 100644 index 000000000..4df618f2c --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vloxseg4ei16.c @@ -0,0 +1,50 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16mf4x4(rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16mf2x4(rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxseg4ei16_v_bf16m1x4(rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vloxseg4ei16_v_bf16m2x4(rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16mf4x4_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16mf2x4_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16m1x4_m(vm, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16m2x4_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vloxseg5ei16.c b/auto-generated/bfloat16/llvm-api-tests/vloxseg5ei16.c new file mode 100644 index 000000000..eab69bdd3 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vloxseg5ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_bf16mf4x5(rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_bf16mf2x5(rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxseg5ei16_v_bf16m1x5(rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_bf16mf4x5_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_bf16mf2x5_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_bf16m1x5_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vloxseg6ei16.c b/auto-generated/bfloat16/llvm-api-tests/vloxseg6ei16.c new file mode 100644 index 000000000..599be467f --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vloxseg6ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_bf16mf4x6(rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_bf16mf2x6(rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxseg6ei16_v_bf16m1x6(rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_bf16mf4x6_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_bf16mf2x6_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_bf16m1x6_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vloxseg7ei16.c b/auto-generated/bfloat16/llvm-api-tests/vloxseg7ei16.c new file mode 100644 index 000000000..022e7c445 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vloxseg7ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_bf16mf4x7(rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_bf16mf2x7(rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxseg7ei16_v_bf16m1x7(rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_bf16mf4x7_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_bf16mf2x7_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_bf16m1x7_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vloxseg8ei16.c b/auto-generated/bfloat16/llvm-api-tests/vloxseg8ei16.c new file mode 100644 index 000000000..081e6baea --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vloxseg8ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_bf16mf4x8(rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_bf16mf2x8(rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxseg8ei16_v_bf16m1x8(rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_bf16mf4x8_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_bf16mf2x8_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_bf16m1x8_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vlse16.c b/auto-generated/bfloat16/llvm-api-tests/vlse16.c new file mode 100644 index 000000000..8d46a5b17 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vlse16.c @@ -0,0 +1,68 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vlse16_v_bf16mf4(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16mf4(rs1, rs2, vl); +} + +vbfloat16mf2_t test_vlse16_v_bf16mf2(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16mf2(rs1, rs2, vl); +} + +vbfloat16m1_t test_vlse16_v_bf16m1(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m1(rs1, rs2, vl); +} + +vbfloat16m2_t test_vlse16_v_bf16m2(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m2(rs1, rs2, vl); +} + +vbfloat16m4_t test_vlse16_v_bf16m4(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m4(rs1, rs2, vl); +} + +vbfloat16m8_t test_vlse16_v_bf16m8(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m8(rs1, rs2, vl); +} + +vbfloat16mf4_t test_vlse16_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_bf16mf4_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vlse16_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_bf16mf2_m(vm, rs1, rs2, vl); +} + +vbfloat16m1_t test_vlse16_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_bf16m1_m(vm, rs1, rs2, vl); +} + +vbfloat16m2_t test_vlse16_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_bf16m2_m(vm, rs1, rs2, vl); +} + +vbfloat16m4_t test_vlse16_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_bf16m4_m(vm, rs1, rs2, vl); +} + +vbfloat16m8_t test_vlse16_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_bf16m8_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vlseg2e16.c b/auto-generated/bfloat16/llvm-api-tests/vlseg2e16.c new file mode 100644 index 000000000..13d67c05d --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vlseg2e16.c @@ -0,0 +1,53 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x2_t test_vlseg2e16_v_bf16mf4x2(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16mf4x2(rs1, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16_v_bf16mf2x2(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16mf2x2(rs1, vl); +} + +vbfloat16m1x2_t test_vlseg2e16_v_bf16m1x2(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16m1x2(rs1, vl); +} + +vbfloat16m2x2_t test_vlseg2e16_v_bf16m2x2(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16m2x2(rs1, vl); +} + +vbfloat16m4x2_t test_vlseg2e16_v_bf16m4x2(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16m4x2(rs1, vl); +} + +vbfloat16mf4x2_t test_vlseg2e16_v_bf16mf4x2_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg2e16_v_bf16mf4x2_m(vm, rs1, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16_v_bf16mf2x2_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg2e16_v_bf16mf2x2_m(vm, rs1, vl); +} + +vbfloat16m1x2_t test_vlseg2e16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg2e16_v_bf16m1x2_m(vm, rs1, vl); +} + +vbfloat16m2x2_t test_vlseg2e16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg2e16_v_bf16m2x2_m(vm, rs1, vl); +} + +vbfloat16m4x2_t test_vlseg2e16_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg2e16_v_bf16m4x2_m(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vlseg2e16ff.c b/auto-generated/bfloat16/llvm-api-tests/vlseg2e16ff.c new file mode 100644 index 000000000..1c1fda35f --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vlseg2e16ff.c @@ -0,0 +1,58 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg2e16ff_v_bf16mf4x2(rs1, new_vl, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg2e16ff_v_bf16mf2x2(rs1, new_vl, vl); +} + +vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m1x2(rs1, new_vl, vl); +} + +vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m2x2(rs1, new_vl, vl); +} + +vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m4x2(rs1, new_vl, vl); +} + +vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16mf4x2_m(vm, rs1, new_vl, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16mf2x2_m(vm, rs1, new_vl, vl); +} + +vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m1x2_m(vm, rs1, new_vl, vl); +} + +vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m2x2_m(vm, rs1, new_vl, vl); +} + +vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m4x2_m(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vlseg3e16.c b/auto-generated/bfloat16/llvm-api-tests/vlseg3e16.c new file mode 100644 index 000000000..a5c119c76 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vlseg3e16.c @@ -0,0 +1,44 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x3_t test_vlseg3e16_v_bf16mf4x3(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16mf4x3(rs1, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16_v_bf16mf2x3(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16mf2x3(rs1, vl); +} + +vbfloat16m1x3_t test_vlseg3e16_v_bf16m1x3(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16m1x3(rs1, vl); +} + +vbfloat16m2x3_t test_vlseg3e16_v_bf16m2x3(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16m2x3(rs1, vl); +} + +vbfloat16mf4x3_t test_vlseg3e16_v_bf16mf4x3_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg3e16_v_bf16mf4x3_m(vm, rs1, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16_v_bf16mf2x3_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg3e16_v_bf16mf2x3_m(vm, rs1, vl); +} + +vbfloat16m1x3_t test_vlseg3e16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg3e16_v_bf16m1x3_m(vm, rs1, vl); +} + +vbfloat16m2x3_t test_vlseg3e16_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg3e16_v_bf16m2x3_m(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vlseg3e16ff.c b/auto-generated/bfloat16/llvm-api-tests/vlseg3e16ff.c new file mode 100644 index 000000000..40fc816b4 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vlseg3e16ff.c @@ -0,0 +1,48 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg3e16ff_v_bf16mf4x3(rs1, new_vl, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg3e16ff_v_bf16mf2x3(rs1, new_vl, vl); +} + +vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg3e16ff_v_bf16m1x3(rs1, new_vl, vl); +} + +vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg3e16ff_v_bf16m2x3(rs1, new_vl, vl); +} + +vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16mf4x3_m(vm, rs1, new_vl, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16mf2x3_m(vm, rs1, new_vl, vl); +} + +vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16m1x3_m(vm, rs1, new_vl, vl); +} + +vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16m2x3_m(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vlseg4e16.c b/auto-generated/bfloat16/llvm-api-tests/vlseg4e16.c new file mode 100644 index 000000000..cd9ec8571 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vlseg4e16.c @@ -0,0 +1,44 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x4_t test_vlseg4e16_v_bf16mf4x4(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16mf4x4(rs1, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16_v_bf16mf2x4(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16mf2x4(rs1, vl); +} + +vbfloat16m1x4_t test_vlseg4e16_v_bf16m1x4(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16m1x4(rs1, vl); +} + +vbfloat16m2x4_t test_vlseg4e16_v_bf16m2x4(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16m2x4(rs1, vl); +} + +vbfloat16mf4x4_t test_vlseg4e16_v_bf16mf4x4_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg4e16_v_bf16mf4x4_m(vm, rs1, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16_v_bf16mf2x4_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg4e16_v_bf16mf2x4_m(vm, rs1, vl); +} + +vbfloat16m1x4_t test_vlseg4e16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg4e16_v_bf16m1x4_m(vm, rs1, vl); +} + +vbfloat16m2x4_t test_vlseg4e16_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg4e16_v_bf16m2x4_m(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vlseg4e16ff.c b/auto-generated/bfloat16/llvm-api-tests/vlseg4e16ff.c new file mode 100644 index 000000000..42c289730 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vlseg4e16ff.c @@ -0,0 +1,48 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg4e16ff_v_bf16mf4x4(rs1, new_vl, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg4e16ff_v_bf16mf2x4(rs1, new_vl, vl); +} + +vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg4e16ff_v_bf16m1x4(rs1, new_vl, vl); +} + +vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg4e16ff_v_bf16m2x4(rs1, new_vl, vl); +} + +vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16mf4x4_m(vm, rs1, new_vl, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16mf2x4_m(vm, rs1, new_vl, vl); +} + +vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16m1x4_m(vm, rs1, new_vl, vl); +} + +vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16m2x4_m(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vlseg5e16.c b/auto-generated/bfloat16/llvm-api-tests/vlseg5e16.c new file mode 100644 index 000000000..4bb145cff --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vlseg5e16.c @@ -0,0 +1,35 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x5_t test_vlseg5e16_v_bf16mf4x5(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_v_bf16mf4x5(rs1, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16_v_bf16mf2x5(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_v_bf16mf2x5(rs1, vl); +} + +vbfloat16m1x5_t test_vlseg5e16_v_bf16m1x5(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_v_bf16m1x5(rs1, vl); +} + +vbfloat16mf4x5_t test_vlseg5e16_v_bf16mf4x5_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg5e16_v_bf16mf4x5_m(vm, rs1, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16_v_bf16mf2x5_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg5e16_v_bf16mf2x5_m(vm, rs1, vl); +} + +vbfloat16m1x5_t test_vlseg5e16_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg5e16_v_bf16m1x5_m(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vlseg5e16ff.c b/auto-generated/bfloat16/llvm-api-tests/vlseg5e16ff.c new file mode 100644 index 000000000..a7e0b5bb9 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vlseg5e16ff.c @@ -0,0 +1,38 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg5e16ff_v_bf16mf4x5(rs1, new_vl, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg5e16ff_v_bf16mf2x5(rs1, new_vl, vl); +} + +vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg5e16ff_v_bf16m1x5(rs1, new_vl, vl); +} + +vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_bf16mf4x5_m(vm, rs1, new_vl, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_bf16mf2x5_m(vm, rs1, new_vl, vl); +} + +vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_bf16m1x5_m(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vlseg6e16.c b/auto-generated/bfloat16/llvm-api-tests/vlseg6e16.c new file mode 100644 index 000000000..fda3bb83e --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vlseg6e16.c @@ -0,0 +1,35 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x6_t test_vlseg6e16_v_bf16mf4x6(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_v_bf16mf4x6(rs1, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16_v_bf16mf2x6(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_v_bf16mf2x6(rs1, vl); +} + +vbfloat16m1x6_t test_vlseg6e16_v_bf16m1x6(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_v_bf16m1x6(rs1, vl); +} + +vbfloat16mf4x6_t test_vlseg6e16_v_bf16mf4x6_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg6e16_v_bf16mf4x6_m(vm, rs1, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16_v_bf16mf2x6_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg6e16_v_bf16mf2x6_m(vm, rs1, vl); +} + +vbfloat16m1x6_t test_vlseg6e16_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg6e16_v_bf16m1x6_m(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vlseg6e16ff.c b/auto-generated/bfloat16/llvm-api-tests/vlseg6e16ff.c new file mode 100644 index 000000000..39b0d80e4 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vlseg6e16ff.c @@ -0,0 +1,38 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg6e16ff_v_bf16mf4x6(rs1, new_vl, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg6e16ff_v_bf16mf2x6(rs1, new_vl, vl); +} + +vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg6e16ff_v_bf16m1x6(rs1, new_vl, vl); +} + +vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_bf16mf4x6_m(vm, rs1, new_vl, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_bf16mf2x6_m(vm, rs1, new_vl, vl); +} + +vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_bf16m1x6_m(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vlseg7e16.c b/auto-generated/bfloat16/llvm-api-tests/vlseg7e16.c new file mode 100644 index 000000000..c19b11c0d --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vlseg7e16.c @@ -0,0 +1,35 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x7_t test_vlseg7e16_v_bf16mf4x7(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_v_bf16mf4x7(rs1, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16_v_bf16mf2x7(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_v_bf16mf2x7(rs1, vl); +} + +vbfloat16m1x7_t test_vlseg7e16_v_bf16m1x7(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_v_bf16m1x7(rs1, vl); +} + +vbfloat16mf4x7_t test_vlseg7e16_v_bf16mf4x7_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg7e16_v_bf16mf4x7_m(vm, rs1, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16_v_bf16mf2x7_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg7e16_v_bf16mf2x7_m(vm, rs1, vl); +} + +vbfloat16m1x7_t test_vlseg7e16_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg7e16_v_bf16m1x7_m(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vlseg7e16ff.c b/auto-generated/bfloat16/llvm-api-tests/vlseg7e16ff.c new file mode 100644 index 000000000..e49d868e5 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vlseg7e16ff.c @@ -0,0 +1,38 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg7e16ff_v_bf16mf4x7(rs1, new_vl, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg7e16ff_v_bf16mf2x7(rs1, new_vl, vl); +} + +vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg7e16ff_v_bf16m1x7(rs1, new_vl, vl); +} + +vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_bf16mf4x7_m(vm, rs1, new_vl, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_bf16mf2x7_m(vm, rs1, new_vl, vl); +} + +vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_bf16m1x7_m(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vlseg8e16.c b/auto-generated/bfloat16/llvm-api-tests/vlseg8e16.c new file mode 100644 index 000000000..64efba08e --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vlseg8e16.c @@ -0,0 +1,35 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x8_t test_vlseg8e16_v_bf16mf4x8(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_v_bf16mf4x8(rs1, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16_v_bf16mf2x8(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_v_bf16mf2x8(rs1, vl); +} + +vbfloat16m1x8_t test_vlseg8e16_v_bf16m1x8(const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_v_bf16m1x8(rs1, vl); +} + +vbfloat16mf4x8_t test_vlseg8e16_v_bf16mf4x8_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg8e16_v_bf16mf4x8_m(vm, rs1, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16_v_bf16mf2x8_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg8e16_v_bf16mf2x8_m(vm, rs1, vl); +} + +vbfloat16m1x8_t test_vlseg8e16_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg8e16_v_bf16m1x8_m(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vlseg8e16ff.c b/auto-generated/bfloat16/llvm-api-tests/vlseg8e16ff.c new file mode 100644 index 000000000..94e2e8ab8 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vlseg8e16ff.c @@ -0,0 +1,38 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg8e16ff_v_bf16mf4x8(rs1, new_vl, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg8e16ff_v_bf16mf2x8(rs1, new_vl, vl); +} + +vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8(const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vlseg8e16ff_v_bf16m1x8(rs1, new_vl, vl); +} + +vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_bf16mf4x8_m(vm, rs1, new_vl, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_bf16mf2x8_m(vm, rs1, new_vl, vl); +} + +vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_bf16m1x8_m(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vlsseg2e16.c b/auto-generated/bfloat16/llvm-api-tests/vlsseg2e16.c new file mode 100644 index 000000000..18feb74f4 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vlsseg2e16.c @@ -0,0 +1,58 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x2_t test_vlsseg2e16_v_bf16mf4x2(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_v_bf16mf4x2(rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vlsseg2e16_v_bf16mf2x2(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_v_bf16mf2x2(rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vlsseg2e16_v_bf16m1x2(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_v_bf16m1x2(rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vlsseg2e16_v_bf16m2x2(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_v_bf16m2x2(rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vlsseg2e16_v_bf16m4x2(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_v_bf16m4x2(rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vlsseg2e16_v_bf16mf4x2_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16mf4x2_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vlsseg2e16_v_bf16mf2x2_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16mf2x2_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vlsseg2e16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16m1x2_m(vm, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vlsseg2e16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16m2x2_m(vm, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vlsseg2e16_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16m4x2_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vlsseg3e16.c b/auto-generated/bfloat16/llvm-api-tests/vlsseg3e16.c new file mode 100644 index 000000000..7f72369a7 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vlsseg3e16.c @@ -0,0 +1,48 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x3_t test_vlsseg3e16_v_bf16mf4x3(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_v_bf16mf4x3(rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vlsseg3e16_v_bf16mf2x3(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_v_bf16mf2x3(rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vlsseg3e16_v_bf16m1x3(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_v_bf16m1x3(rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vlsseg3e16_v_bf16m2x3(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_v_bf16m2x3(rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vlsseg3e16_v_bf16mf4x3_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_bf16mf4x3_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vlsseg3e16_v_bf16mf2x3_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_bf16mf2x3_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vlsseg3e16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_bf16m1x3_m(vm, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vlsseg3e16_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_bf16m2x3_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vlsseg4e16.c b/auto-generated/bfloat16/llvm-api-tests/vlsseg4e16.c new file mode 100644 index 000000000..39843fda2 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vlsseg4e16.c @@ -0,0 +1,48 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x4_t test_vlsseg4e16_v_bf16mf4x4(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_v_bf16mf4x4(rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vlsseg4e16_v_bf16mf2x4(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_v_bf16mf2x4(rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vlsseg4e16_v_bf16m1x4(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_v_bf16m1x4(rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vlsseg4e16_v_bf16m2x4(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_v_bf16m2x4(rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vlsseg4e16_v_bf16mf4x4_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_bf16mf4x4_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vlsseg4e16_v_bf16mf2x4_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_bf16mf2x4_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vlsseg4e16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_bf16m1x4_m(vm, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vlsseg4e16_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_bf16m2x4_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vlsseg5e16.c b/auto-generated/bfloat16/llvm-api-tests/vlsseg5e16.c new file mode 100644 index 000000000..08455a058 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vlsseg5e16.c @@ -0,0 +1,38 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x5_t test_vlsseg5e16_v_bf16mf4x5(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg5e16_v_bf16mf4x5(rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vlsseg5e16_v_bf16mf2x5(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg5e16_v_bf16mf2x5(rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vlsseg5e16_v_bf16m1x5(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg5e16_v_bf16m1x5(rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vlsseg5e16_v_bf16mf4x5_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_bf16mf4x5_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vlsseg5e16_v_bf16mf2x5_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_bf16mf2x5_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vlsseg5e16_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_bf16m1x5_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vlsseg6e16.c b/auto-generated/bfloat16/llvm-api-tests/vlsseg6e16.c new file mode 100644 index 000000000..6ac7d3d7c --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vlsseg6e16.c @@ -0,0 +1,38 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x6_t test_vlsseg6e16_v_bf16mf4x6(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg6e16_v_bf16mf4x6(rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vlsseg6e16_v_bf16mf2x6(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg6e16_v_bf16mf2x6(rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vlsseg6e16_v_bf16m1x6(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg6e16_v_bf16m1x6(rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vlsseg6e16_v_bf16mf4x6_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_bf16mf4x6_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vlsseg6e16_v_bf16mf2x6_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_bf16mf2x6_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vlsseg6e16_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_bf16m1x6_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vlsseg7e16.c b/auto-generated/bfloat16/llvm-api-tests/vlsseg7e16.c new file mode 100644 index 000000000..cb76097ae --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vlsseg7e16.c @@ -0,0 +1,38 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x7_t test_vlsseg7e16_v_bf16mf4x7(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg7e16_v_bf16mf4x7(rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vlsseg7e16_v_bf16mf2x7(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg7e16_v_bf16mf2x7(rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vlsseg7e16_v_bf16m1x7(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg7e16_v_bf16m1x7(rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vlsseg7e16_v_bf16mf4x7_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_bf16mf4x7_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vlsseg7e16_v_bf16mf2x7_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_bf16mf2x7_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vlsseg7e16_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_bf16m1x7_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vlsseg8e16.c b/auto-generated/bfloat16/llvm-api-tests/vlsseg8e16.c new file mode 100644 index 000000000..b74c31575 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vlsseg8e16.c @@ -0,0 +1,38 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x8_t test_vlsseg8e16_v_bf16mf4x8(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg8e16_v_bf16mf4x8(rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vlsseg8e16_v_bf16mf2x8(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg8e16_v_bf16mf2x8(rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vlsseg8e16_v_bf16m1x8(const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg8e16_v_bf16m1x8(rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vlsseg8e16_v_bf16mf4x8_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_bf16mf4x8_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vlsseg8e16_v_bf16mf2x8_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_bf16mf2x8_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vlsseg8e16_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_bf16m1x8_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vluxei16.c b/auto-generated/bfloat16/llvm-api-tests/vluxei16.c new file mode 100644 index 000000000..453ed6312 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vluxei16.c @@ -0,0 +1,68 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vluxei16_v_bf16mf4(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16mf4(rs1, rs2, vl); +} + +vbfloat16mf2_t test_vluxei16_v_bf16mf2(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16mf2(rs1, rs2, vl); +} + +vbfloat16m1_t test_vluxei16_v_bf16m1(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m1(rs1, rs2, vl); +} + +vbfloat16m2_t test_vluxei16_v_bf16m2(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m2(rs1, rs2, vl); +} + +vbfloat16m4_t test_vluxei16_v_bf16m4(const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m4(rs1, rs2, vl); +} + +vbfloat16m8_t test_vluxei16_v_bf16m8(const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m8(rs1, rs2, vl); +} + +vbfloat16mf4_t test_vluxei16_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_bf16mf4_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vluxei16_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_bf16mf2_m(vm, rs1, rs2, vl); +} + +vbfloat16m1_t test_vluxei16_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_bf16m1_m(vm, rs1, rs2, vl); +} + +vbfloat16m2_t test_vluxei16_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_bf16m2_m(vm, rs1, rs2, vl); +} + +vbfloat16m4_t test_vluxei16_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_bf16m4_m(vm, rs1, rs2, vl); +} + +vbfloat16m8_t test_vluxei16_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1, + vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_bf16m8_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vluxseg2ei16.c b/auto-generated/bfloat16/llvm-api-tests/vluxseg2ei16.c new file mode 100644 index 000000000..ada9b0f4a --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vluxseg2ei16.c @@ -0,0 +1,60 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16mf4x2(rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16mf2x2(rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m1x2(rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m2x2(rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2(const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m4x2(rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16mf4x2_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16mf2x2_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m1x2_m(vm, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m2x2_m(vm, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m4x2_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vluxseg3ei16.c b/auto-generated/bfloat16/llvm-api-tests/vluxseg3ei16.c new file mode 100644 index 000000000..8e8fa7459 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vluxseg3ei16.c @@ -0,0 +1,50 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16mf4x3(rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16mf2x3(rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxseg3ei16_v_bf16m1x3(rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vluxseg3ei16_v_bf16m2x3(rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16mf4x3_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16mf2x3_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16m1x3_m(vm, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16m2x3_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vluxseg4ei16.c b/auto-generated/bfloat16/llvm-api-tests/vluxseg4ei16.c new file mode 100644 index 000000000..e8721c4dc --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vluxseg4ei16.c @@ -0,0 +1,50 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16mf4x4(rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16mf2x4(rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxseg4ei16_v_bf16m1x4(rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vluxseg4ei16_v_bf16m2x4(rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16mf4x4_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16mf2x4_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16m1x4_m(vm, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16m2x4_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vluxseg5ei16.c b/auto-generated/bfloat16/llvm-api-tests/vluxseg5ei16.c new file mode 100644 index 000000000..7251bd57d --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vluxseg5ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_bf16mf4x5(rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_bf16mf2x5(rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxseg5ei16_v_bf16m1x5(rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_bf16mf4x5_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_bf16mf2x5_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_bf16m1x5_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vluxseg6ei16.c b/auto-generated/bfloat16/llvm-api-tests/vluxseg6ei16.c new file mode 100644 index 000000000..d1bc20059 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vluxseg6ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_bf16mf4x6(rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_bf16mf2x6(rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxseg6ei16_v_bf16m1x6(rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_bf16mf4x6_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_bf16mf2x6_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_bf16m1x6_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vluxseg7ei16.c b/auto-generated/bfloat16/llvm-api-tests/vluxseg7ei16.c new file mode 100644 index 000000000..8976004be --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vluxseg7ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_bf16mf4x7(rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_bf16mf2x7(rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxseg7ei16_v_bf16m1x7(rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_bf16mf4x7_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_bf16mf2x7_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_bf16m1x7_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vluxseg8ei16.c b/auto-generated/bfloat16/llvm-api-tests/vluxseg8ei16.c new file mode 100644 index 000000000..035146ca5 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vluxseg8ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_bf16mf4x8(rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_bf16mf2x8(rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxseg8ei16_v_bf16m1x8(rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_bf16mf4x8_m(vm, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_bf16mf2x8_m(vm, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_bf16m1x8_m(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vmerge.c b/auto-generated/bfloat16/llvm-api-tests/vmerge.c new file mode 100644 index 000000000..c2962bf98 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vmerge.c @@ -0,0 +1,38 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vmerge_vvm_bf16mf4(vbfloat16mf4_t vs2, vbfloat16mf4_t vs1, + vbool64_t v0, size_t vl) { + return __riscv_vmerge_vvm_bf16mf4(vs2, vs1, v0, vl); +} + +vbfloat16mf2_t test_vmerge_vvm_bf16mf2(vbfloat16mf2_t vs2, vbfloat16mf2_t vs1, + vbool32_t v0, size_t vl) { + return __riscv_vmerge_vvm_bf16mf2(vs2, vs1, v0, vl); +} + +vbfloat16m1_t test_vmerge_vvm_bf16m1(vbfloat16m1_t vs2, vbfloat16m1_t vs1, + vbool16_t v0, size_t vl) { + return __riscv_vmerge_vvm_bf16m1(vs2, vs1, v0, vl); +} + +vbfloat16m2_t test_vmerge_vvm_bf16m2(vbfloat16m2_t vs2, vbfloat16m2_t vs1, + vbool8_t v0, size_t vl) { + return __riscv_vmerge_vvm_bf16m2(vs2, vs1, v0, vl); +} + +vbfloat16m4_t test_vmerge_vvm_bf16m4(vbfloat16m4_t vs2, vbfloat16m4_t vs1, + vbool4_t v0, size_t vl) { + return __riscv_vmerge_vvm_bf16m4(vs2, vs1, v0, vl); +} + +vbfloat16m8_t test_vmerge_vvm_bf16m8(vbfloat16m8_t vs2, vbfloat16m8_t vs1, + vbool2_t v0, size_t vl) { + return __riscv_vmerge_vvm_bf16m8(vs2, vs1, v0, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vmv.c b/auto-generated/bfloat16/llvm-api-tests/vmv.c new file mode 100644 index 000000000..b0b0f2bd7 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vmv.c @@ -0,0 +1,32 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vmv_v_v_bf16mf4(vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vmv_v_v_bf16mf4(vs1, vl); +} + +vbfloat16mf2_t test_vmv_v_v_bf16mf2(vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vmv_v_v_bf16mf2(vs1, vl); +} + +vbfloat16m1_t test_vmv_v_v_bf16m1(vbfloat16m1_t vs1, size_t vl) { + return __riscv_vmv_v_v_bf16m1(vs1, vl); +} + +vbfloat16m2_t test_vmv_v_v_bf16m2(vbfloat16m2_t vs1, size_t vl) { + return __riscv_vmv_v_v_bf16m2(vs1, vl); +} + +vbfloat16m4_t test_vmv_v_v_bf16m4(vbfloat16m4_t vs1, size_t vl) { + return __riscv_vmv_v_v_bf16m4(vs1, vl); +} + +vbfloat16m8_t test_vmv_v_v_bf16m8(vbfloat16m8_t vs1, size_t vl) { + return __riscv_vmv_v_v_bf16m8(vs1, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vreinterpret.c b/auto-generated/bfloat16/llvm-api-tests/vreinterpret.c new file mode 100644 index 000000000..1ea35ebe4 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vreinterpret.c @@ -0,0 +1,104 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vreinterpret_v_i16mf4_bf16mf4(vint16mf4_t src) { + return __riscv_vreinterpret_v_i16mf4_bf16mf4(src); +} + +vbfloat16mf2_t test_vreinterpret_v_i16mf2_bf16mf2(vint16mf2_t src) { + return __riscv_vreinterpret_v_i16mf2_bf16mf2(src); +} + +vbfloat16m1_t test_vreinterpret_v_i16m1_bf16m1(vint16m1_t src) { + return __riscv_vreinterpret_v_i16m1_bf16m1(src); +} + +vbfloat16m2_t test_vreinterpret_v_i16m2_bf16m2(vint16m2_t src) { + return __riscv_vreinterpret_v_i16m2_bf16m2(src); +} + +vbfloat16m4_t test_vreinterpret_v_i16m4_bf16m4(vint16m4_t src) { + return __riscv_vreinterpret_v_i16m4_bf16m4(src); +} + +vbfloat16m8_t test_vreinterpret_v_i16m8_bf16m8(vint16m8_t src) { + return __riscv_vreinterpret_v_i16m8_bf16m8(src); +} + +vbfloat16mf4_t test_vreinterpret_v_u16mf4_bf16mf4(vuint16mf4_t src) { + return __riscv_vreinterpret_v_u16mf4_bf16mf4(src); +} + +vbfloat16mf2_t test_vreinterpret_v_u16mf2_bf16mf2(vuint16mf2_t src) { + return __riscv_vreinterpret_v_u16mf2_bf16mf2(src); +} + +vbfloat16m1_t test_vreinterpret_v_u16m1_bf16m1(vuint16m1_t src) { + return __riscv_vreinterpret_v_u16m1_bf16m1(src); +} + +vbfloat16m2_t test_vreinterpret_v_u16m2_bf16m2(vuint16m2_t src) { + return __riscv_vreinterpret_v_u16m2_bf16m2(src); +} + +vbfloat16m4_t test_vreinterpret_v_u16m4_bf16m4(vuint16m4_t src) { + return __riscv_vreinterpret_v_u16m4_bf16m4(src); +} + +vbfloat16m8_t test_vreinterpret_v_u16m8_bf16m8(vuint16m8_t src) { + return __riscv_vreinterpret_v_u16m8_bf16m8(src); +} + +vint16mf4_t test_vreinterpret_v_bf16mf4_i16mf4(vbfloat16mf4_t src) { + return __riscv_vreinterpret_v_bf16mf4_i16mf4(src); +} + +vint16mf2_t test_vreinterpret_v_bf16mf2_i16mf2(vbfloat16mf2_t src) { + return __riscv_vreinterpret_v_bf16mf2_i16mf2(src); +} + +vint16m1_t test_vreinterpret_v_bf16m1_i16m1(vbfloat16m1_t src) { + return __riscv_vreinterpret_v_bf16m1_i16m1(src); +} + +vint16m2_t test_vreinterpret_v_bf16m2_i16m2(vbfloat16m2_t src) { + return __riscv_vreinterpret_v_bf16m2_i16m2(src); +} + +vint16m4_t test_vreinterpret_v_bf16m4_i16m4(vbfloat16m4_t src) { + return __riscv_vreinterpret_v_bf16m4_i16m4(src); +} + +vint16m8_t test_vreinterpret_v_bf16m8_i16m8(vbfloat16m8_t src) { + return __riscv_vreinterpret_v_bf16m8_i16m8(src); +} + +vuint16mf4_t test_vreinterpret_v_bf16mf4_u16mf4(vbfloat16mf4_t src) { + return __riscv_vreinterpret_v_bf16mf4_u16mf4(src); +} + +vuint16mf2_t test_vreinterpret_v_bf16mf2_u16mf2(vbfloat16mf2_t src) { + return __riscv_vreinterpret_v_bf16mf2_u16mf2(src); +} + +vuint16m1_t test_vreinterpret_v_bf16m1_u16m1(vbfloat16m1_t src) { + return __riscv_vreinterpret_v_bf16m1_u16m1(src); +} + +vuint16m2_t test_vreinterpret_v_bf16m2_u16m2(vbfloat16m2_t src) { + return __riscv_vreinterpret_v_bf16m2_u16m2(src); +} + +vuint16m4_t test_vreinterpret_v_bf16m4_u16m4(vbfloat16m4_t src) { + return __riscv_vreinterpret_v_bf16m4_u16m4(src); +} + +vuint16m8_t test_vreinterpret_v_bf16m8_u16m8(vbfloat16m8_t src) { + return __riscv_vreinterpret_v_bf16m8_u16m8(src); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vse16.c b/auto-generated/bfloat16/llvm-api-tests/vse16.c new file mode 100644 index 000000000..ef8439b13 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vse16.c @@ -0,0 +1,62 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vse16_v_bf16mf4(__bf16 *rs1, vbfloat16mf4_t vs3, size_t vl) { + return __riscv_vse16_v_bf16mf4(rs1, vs3, vl); +} + +void test_vse16_v_bf16mf2(__bf16 *rs1, vbfloat16mf2_t vs3, size_t vl) { + return __riscv_vse16_v_bf16mf2(rs1, vs3, vl); +} + +void test_vse16_v_bf16m1(__bf16 *rs1, vbfloat16m1_t vs3, size_t vl) { + return __riscv_vse16_v_bf16m1(rs1, vs3, vl); +} + +void test_vse16_v_bf16m2(__bf16 *rs1, vbfloat16m2_t vs3, size_t vl) { + return __riscv_vse16_v_bf16m2(rs1, vs3, vl); +} + +void test_vse16_v_bf16m4(__bf16 *rs1, vbfloat16m4_t vs3, size_t vl) { + return __riscv_vse16_v_bf16m4(rs1, vs3, vl); +} + +void test_vse16_v_bf16m8(__bf16 *rs1, vbfloat16m8_t vs3, size_t vl) { + return __riscv_vse16_v_bf16m8(rs1, vs3, vl); +} + +void test_vse16_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, vbfloat16mf4_t vs3, + size_t vl) { + return __riscv_vse16_v_bf16mf4_m(vm, rs1, vs3, vl); +} + +void test_vse16_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, vbfloat16mf2_t vs3, + size_t vl) { + return __riscv_vse16_v_bf16mf2_m(vm, rs1, vs3, vl); +} + +void test_vse16_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1_t vs3, + size_t vl) { + return __riscv_vse16_v_bf16m1_m(vm, rs1, vs3, vl); +} + +void test_vse16_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, vbfloat16m2_t vs3, + size_t vl) { + return __riscv_vse16_v_bf16m2_m(vm, rs1, vs3, vl); +} + +void test_vse16_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, vbfloat16m4_t vs3, + size_t vl) { + return __riscv_vse16_v_bf16m4_m(vm, rs1, vs3, vl); +} + +void test_vse16_v_bf16m8_m(vbool2_t vm, __bf16 *rs1, vbfloat16m8_t vs3, + size_t vl) { + return __riscv_vse16_v_bf16m8_m(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vset.c b/auto-generated/bfloat16/llvm-api-tests/vset.c new file mode 100644 index 000000000..34eb99083 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vset.c @@ -0,0 +1,177 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16m2_t test_vset_v_bf16m1_bf16m2(vbfloat16m2_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset_v_bf16m1_bf16m2(dest, 0, value); +} + +vbfloat16m4_t test_vset_v_bf16m1_bf16m4(vbfloat16m4_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset_v_bf16m1_bf16m4(dest, 0, value); +} + +vbfloat16m4_t test_vset_v_bf16m2_bf16m4(vbfloat16m4_t dest, size_t index, + vbfloat16m2_t value) { + return __riscv_vset_v_bf16m2_bf16m4(dest, 0, value); +} + +vbfloat16m8_t test_vset_v_bf16m1_bf16m8(vbfloat16m8_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset_v_bf16m1_bf16m8(dest, 0, value); +} + +vbfloat16m8_t test_vset_v_bf16m2_bf16m8(vbfloat16m8_t dest, size_t index, + vbfloat16m2_t value) { + return __riscv_vset_v_bf16m2_bf16m8(dest, 0, value); +} + +vbfloat16m8_t test_vset_v_bf16m4_bf16m8(vbfloat16m8_t dest, size_t index, + vbfloat16m4_t value) { + return __riscv_vset_v_bf16m4_bf16m8(dest, 0, value); +} + +vbfloat16mf4x2_t test_vset_v_bf16mf4_bf16mf4x2(vbfloat16mf4x2_t dest, + size_t index, + vbfloat16mf4_t value) { + return __riscv_vset_v_bf16mf4_bf16mf4x2(dest, 0, value); +} + +vbfloat16mf4x3_t test_vset_v_bf16mf4_bf16mf4x3(vbfloat16mf4x3_t dest, + size_t index, + vbfloat16mf4_t value) { + return __riscv_vset_v_bf16mf4_bf16mf4x3(dest, 0, value); +} + +vbfloat16mf4x4_t test_vset_v_bf16mf4_bf16mf4x4(vbfloat16mf4x4_t dest, + size_t index, + vbfloat16mf4_t value) { + return __riscv_vset_v_bf16mf4_bf16mf4x4(dest, 0, value); +} + +vbfloat16mf4x5_t test_vset_v_bf16mf4_bf16mf4x5(vbfloat16mf4x5_t dest, + size_t index, + vbfloat16mf4_t value) { + return __riscv_vset_v_bf16mf4_bf16mf4x5(dest, 0, value); +} + +vbfloat16mf4x6_t test_vset_v_bf16mf4_bf16mf4x6(vbfloat16mf4x6_t dest, + size_t index, + vbfloat16mf4_t value) { + return __riscv_vset_v_bf16mf4_bf16mf4x6(dest, 0, value); +} + +vbfloat16mf4x7_t test_vset_v_bf16mf4_bf16mf4x7(vbfloat16mf4x7_t dest, + size_t index, + vbfloat16mf4_t value) { + return __riscv_vset_v_bf16mf4_bf16mf4x7(dest, 0, value); +} + +vbfloat16mf4x8_t test_vset_v_bf16mf4_bf16mf4x8(vbfloat16mf4x8_t dest, + size_t index, + vbfloat16mf4_t value) { + return __riscv_vset_v_bf16mf4_bf16mf4x8(dest, 0, value); +} + +vbfloat16mf2x2_t test_vset_v_bf16mf2_bf16mf2x2(vbfloat16mf2x2_t dest, + size_t index, + vbfloat16mf2_t value) { + return __riscv_vset_v_bf16mf2_bf16mf2x2(dest, 0, value); +} + +vbfloat16mf2x3_t test_vset_v_bf16mf2_bf16mf2x3(vbfloat16mf2x3_t dest, + size_t index, + vbfloat16mf2_t value) { + return __riscv_vset_v_bf16mf2_bf16mf2x3(dest, 0, value); +} + +vbfloat16mf2x4_t test_vset_v_bf16mf2_bf16mf2x4(vbfloat16mf2x4_t dest, + size_t index, + vbfloat16mf2_t value) { + return __riscv_vset_v_bf16mf2_bf16mf2x4(dest, 0, value); +} + +vbfloat16mf2x5_t test_vset_v_bf16mf2_bf16mf2x5(vbfloat16mf2x5_t dest, + size_t index, + vbfloat16mf2_t value) { + return __riscv_vset_v_bf16mf2_bf16mf2x5(dest, 0, value); +} + +vbfloat16mf2x6_t test_vset_v_bf16mf2_bf16mf2x6(vbfloat16mf2x6_t dest, + size_t index, + vbfloat16mf2_t value) { + return __riscv_vset_v_bf16mf2_bf16mf2x6(dest, 0, value); +} + +vbfloat16mf2x7_t test_vset_v_bf16mf2_bf16mf2x7(vbfloat16mf2x7_t dest, + size_t index, + vbfloat16mf2_t value) { + return __riscv_vset_v_bf16mf2_bf16mf2x7(dest, 0, value); +} + +vbfloat16mf2x8_t test_vset_v_bf16mf2_bf16mf2x8(vbfloat16mf2x8_t dest, + size_t index, + vbfloat16mf2_t value) { + return __riscv_vset_v_bf16mf2_bf16mf2x8(dest, 0, value); +} + +vbfloat16m1x2_t test_vset_v_bf16m1_bf16m1x2(vbfloat16m1x2_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset_v_bf16m1_bf16m1x2(dest, 0, value); +} + +vbfloat16m1x3_t test_vset_v_bf16m1_bf16m1x3(vbfloat16m1x3_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset_v_bf16m1_bf16m1x3(dest, 0, value); +} + +vbfloat16m1x4_t test_vset_v_bf16m1_bf16m1x4(vbfloat16m1x4_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset_v_bf16m1_bf16m1x4(dest, 0, value); +} + +vbfloat16m1x5_t test_vset_v_bf16m1_bf16m1x5(vbfloat16m1x5_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset_v_bf16m1_bf16m1x5(dest, 0, value); +} + +vbfloat16m1x6_t test_vset_v_bf16m1_bf16m1x6(vbfloat16m1x6_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset_v_bf16m1_bf16m1x6(dest, 0, value); +} + +vbfloat16m1x7_t test_vset_v_bf16m1_bf16m1x7(vbfloat16m1x7_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset_v_bf16m1_bf16m1x7(dest, 0, value); +} + +vbfloat16m1x8_t test_vset_v_bf16m1_bf16m1x8(vbfloat16m1x8_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset_v_bf16m1_bf16m1x8(dest, 0, value); +} + +vbfloat16m2x2_t test_vset_v_bf16m2_bf16m2x2(vbfloat16m2x2_t dest, size_t index, + vbfloat16m2_t value) { + return __riscv_vset_v_bf16m2_bf16m2x2(dest, 0, value); +} + +vbfloat16m2x3_t test_vset_v_bf16m2_bf16m2x3(vbfloat16m2x3_t dest, size_t index, + vbfloat16m2_t value) { + return __riscv_vset_v_bf16m2_bf16m2x3(dest, 0, value); +} + +vbfloat16m2x4_t test_vset_v_bf16m2_bf16m2x4(vbfloat16m2x4_t dest, size_t index, + vbfloat16m2_t value) { + return __riscv_vset_v_bf16m2_bf16m2x4(dest, 0, value); +} + +vbfloat16m4x2_t test_vset_v_bf16m4_bf16m4x2(vbfloat16m4x2_t dest, size_t index, + vbfloat16m4_t value) { + return __riscv_vset_v_bf16m4_bf16m4x2(dest, 0, value); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vsoxei16.c b/auto-generated/bfloat16/llvm-api-tests/vsoxei16.c new file mode 100644 index 000000000..f68526908 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vsoxei16.c @@ -0,0 +1,68 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsoxei16_v_bf16mf4(__bf16 *rs1, vuint16mf4_t rs2, vbfloat16mf4_t vs3, + size_t vl) { + return __riscv_vsoxei16_v_bf16mf4(rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16mf2(__bf16 *rs1, vuint16mf2_t rs2, vbfloat16mf2_t vs3, + size_t vl) { + return __riscv_vsoxei16_v_bf16mf2(rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m1(__bf16 *rs1, vuint16m1_t rs2, vbfloat16m1_t vs3, + size_t vl) { + return __riscv_vsoxei16_v_bf16m1(rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m2(__bf16 *rs1, vuint16m2_t rs2, vbfloat16m2_t vs3, + size_t vl) { + return __riscv_vsoxei16_v_bf16m2(rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m4(__bf16 *rs1, vuint16m4_t rs2, vbfloat16m4_t vs3, + size_t vl) { + return __riscv_vsoxei16_v_bf16m4(rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m8(__bf16 *rs1, vuint16m8_t rs2, vbfloat16m8_t vs3, + size_t vl) { + return __riscv_vsoxei16_v_bf16m8(rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, vuint16mf4_t rs2, + vbfloat16mf4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_bf16mf4_m(vm, rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, vuint16mf2_t rs2, + vbfloat16mf2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_bf16mf2_m(vm, rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t rs2, + vbfloat16m1_t vs3, size_t vl) { + return __riscv_vsoxei16_v_bf16m1_m(vm, rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t rs2, + vbfloat16m2_t vs3, size_t vl) { + return __riscv_vsoxei16_v_bf16m2_m(vm, rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, vuint16m4_t rs2, + vbfloat16m4_t vs3, size_t vl) { + return __riscv_vsoxei16_v_bf16m4_m(vm, rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m8_m(vbool2_t vm, __bf16 *rs1, vuint16m8_t rs2, + vbfloat16m8_t vs3, size_t vl) { + return __riscv_vsoxei16_v_bf16m8_m(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vsoxseg2ei16.c b/auto-generated/bfloat16/llvm-api-tests/vsoxseg2ei16.c new file mode 100644 index 000000000..0fc04e2f9 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vsoxseg2ei16.c @@ -0,0 +1,60 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsoxseg2ei16_v_bf16mf4x2(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_bf16mf4x2(rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16mf2x2(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_bf16mf2x2(rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16m1x2(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_bf16m1x2(rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16m2x2(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_bf16m2x2(rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16m4x2(__bf16 *rs1, vuint16m4_t vs2, + vbfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_bf16m4x2(rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x2_t vs3, + size_t vl) { + return __riscv_vsoxseg2ei16_v_bf16mf4x2_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x2_t vs3, + size_t vl) { + return __riscv_vsoxseg2ei16_v_bf16mf2x2_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_bf16m1x2_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_bf16m2x2_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, vuint16m4_t vs2, + vbfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16_v_bf16m4x2_m(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vsoxseg3ei16.c b/auto-generated/bfloat16/llvm-api-tests/vsoxseg3ei16.c new file mode 100644 index 000000000..fc7f7d9a0 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vsoxseg3ei16.c @@ -0,0 +1,50 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsoxseg3ei16_v_bf16mf4x3(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_bf16mf4x3(rs1, vs2, vs3, vl); +} + +void test_vsoxseg3ei16_v_bf16mf2x3(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_bf16mf2x3(rs1, vs2, vs3, vl); +} + +void test_vsoxseg3ei16_v_bf16m1x3(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_bf16m1x3(rs1, vs2, vs3, vl); +} + +void test_vsoxseg3ei16_v_bf16m2x3(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_bf16m2x3(rs1, vs2, vs3, vl); +} + +void test_vsoxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x3_t vs3, + size_t vl) { + return __riscv_vsoxseg3ei16_v_bf16mf4x3_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x3_t vs3, + size_t vl) { + return __riscv_vsoxseg3ei16_v_bf16mf2x3_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg3ei16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_bf16m1x3_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg3ei16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16_v_bf16m2x3_m(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vsoxseg4ei16.c b/auto-generated/bfloat16/llvm-api-tests/vsoxseg4ei16.c new file mode 100644 index 000000000..f1afd3b0b --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vsoxseg4ei16.c @@ -0,0 +1,50 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsoxseg4ei16_v_bf16mf4x4(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_bf16mf4x4(rs1, vs2, vs3, vl); +} + +void test_vsoxseg4ei16_v_bf16mf2x4(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_bf16mf2x4(rs1, vs2, vs3, vl); +} + +void test_vsoxseg4ei16_v_bf16m1x4(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_bf16m1x4(rs1, vs2, vs3, vl); +} + +void test_vsoxseg4ei16_v_bf16m2x4(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_bf16m2x4(rs1, vs2, vs3, vl); +} + +void test_vsoxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x4_t vs3, + size_t vl) { + return __riscv_vsoxseg4ei16_v_bf16mf4x4_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x4_t vs3, + size_t vl) { + return __riscv_vsoxseg4ei16_v_bf16mf2x4_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg4ei16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_bf16m1x4_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg4ei16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16_v_bf16m2x4_m(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vsoxseg5ei16.c b/auto-generated/bfloat16/llvm-api-tests/vsoxseg5ei16.c new file mode 100644 index 000000000..01a77f055 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vsoxseg5ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsoxseg5ei16_v_bf16mf4x5(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_bf16mf4x5(rs1, vs2, vs3, vl); +} + +void test_vsoxseg5ei16_v_bf16mf2x5(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_bf16mf2x5(rs1, vs2, vs3, vl); +} + +void test_vsoxseg5ei16_v_bf16m1x5(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_bf16m1x5(rs1, vs2, vs3, vl); +} + +void test_vsoxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x5_t vs3, + size_t vl) { + return __riscv_vsoxseg5ei16_v_bf16mf4x5_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x5_t vs3, + size_t vl) { + return __riscv_vsoxseg5ei16_v_bf16mf2x5_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg5ei16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16_v_bf16m1x5_m(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vsoxseg6ei16.c b/auto-generated/bfloat16/llvm-api-tests/vsoxseg6ei16.c new file mode 100644 index 000000000..c49cd7df9 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vsoxseg6ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsoxseg6ei16_v_bf16mf4x6(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_bf16mf4x6(rs1, vs2, vs3, vl); +} + +void test_vsoxseg6ei16_v_bf16mf2x6(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_bf16mf2x6(rs1, vs2, vs3, vl); +} + +void test_vsoxseg6ei16_v_bf16m1x6(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_bf16m1x6(rs1, vs2, vs3, vl); +} + +void test_vsoxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x6_t vs3, + size_t vl) { + return __riscv_vsoxseg6ei16_v_bf16mf4x6_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x6_t vs3, + size_t vl) { + return __riscv_vsoxseg6ei16_v_bf16mf2x6_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg6ei16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16_v_bf16m1x6_m(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vsoxseg7ei16.c b/auto-generated/bfloat16/llvm-api-tests/vsoxseg7ei16.c new file mode 100644 index 000000000..b48bf2b3b --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vsoxseg7ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsoxseg7ei16_v_bf16mf4x7(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_bf16mf4x7(rs1, vs2, vs3, vl); +} + +void test_vsoxseg7ei16_v_bf16mf2x7(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_bf16mf2x7(rs1, vs2, vs3, vl); +} + +void test_vsoxseg7ei16_v_bf16m1x7(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_bf16m1x7(rs1, vs2, vs3, vl); +} + +void test_vsoxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x7_t vs3, + size_t vl) { + return __riscv_vsoxseg7ei16_v_bf16mf4x7_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x7_t vs3, + size_t vl) { + return __riscv_vsoxseg7ei16_v_bf16mf2x7_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg7ei16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16_v_bf16m1x7_m(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vsoxseg8ei16.c b/auto-generated/bfloat16/llvm-api-tests/vsoxseg8ei16.c new file mode 100644 index 000000000..b8157f46e --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vsoxseg8ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsoxseg8ei16_v_bf16mf4x8(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_bf16mf4x8(rs1, vs2, vs3, vl); +} + +void test_vsoxseg8ei16_v_bf16mf2x8(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_bf16mf2x8(rs1, vs2, vs3, vl); +} + +void test_vsoxseg8ei16_v_bf16m1x8(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_bf16m1x8(rs1, vs2, vs3, vl); +} + +void test_vsoxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x8_t vs3, + size_t vl) { + return __riscv_vsoxseg8ei16_v_bf16mf4x8_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x8_t vs3, + size_t vl) { + return __riscv_vsoxseg8ei16_v_bf16mf2x8_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg8ei16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16_v_bf16m1x8_m(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vsse16.c b/auto-generated/bfloat16/llvm-api-tests/vsse16.c new file mode 100644 index 000000000..a202494b7 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vsse16.c @@ -0,0 +1,68 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsse16_v_bf16mf4(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf4_t vs3, + size_t vl) { + return __riscv_vsse16_v_bf16mf4(rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16mf2(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf2_t vs3, + size_t vl) { + return __riscv_vsse16_v_bf16mf2(rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m1(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1_t vs3, + size_t vl) { + return __riscv_vsse16_v_bf16m1(rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m2(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m2_t vs3, + size_t vl) { + return __riscv_vsse16_v_bf16m2(rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m4(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m4_t vs3, + size_t vl) { + return __riscv_vsse16_v_bf16m4(rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m8(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m8_t vs3, + size_t vl) { + return __riscv_vsse16_v_bf16m8(rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4_t vs3, size_t vl) { + return __riscv_vsse16_v_bf16mf4_m(vm, rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2_t vs3, size_t vl) { + return __riscv_vsse16_v_bf16mf2_m(vm, rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1_t vs3, size_t vl) { + return __riscv_vsse16_v_bf16m1_m(vm, rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2_t vs3, size_t vl) { + return __riscv_vsse16_v_bf16m2_m(vm, rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m4_t vs3, size_t vl) { + return __riscv_vsse16_v_bf16m4_m(vm, rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m8_m(vbool2_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m8_t vs3, size_t vl) { + return __riscv_vsse16_v_bf16m8_m(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vsseg2e16.c b/auto-generated/bfloat16/llvm-api-tests/vsseg2e16.c new file mode 100644 index 000000000..ed102d645 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vsseg2e16.c @@ -0,0 +1,53 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsseg2e16_v_bf16mf4x2(__bf16 *rs1, vbfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_bf16mf4x2(rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16mf2x2(__bf16 *rs1, vbfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_bf16mf2x2(rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16m1x2(__bf16 *rs1, vbfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_bf16m1x2(rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16m2x2(__bf16 *rs1, vbfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_bf16m2x2(rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16m4x2(__bf16 *rs1, vbfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_bf16m4x2(rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_bf16mf4x2_m(vm, rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16_v_bf16mf2x2_m(vm, rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1x2_t vs3, + size_t vl) { + return __riscv_vsseg2e16_v_bf16m1x2_m(vm, rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vbfloat16m2x2_t vs3, + size_t vl) { + return __riscv_vsseg2e16_v_bf16m2x2_m(vm, rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, vbfloat16m4x2_t vs3, + size_t vl) { + return __riscv_vsseg2e16_v_bf16m4x2_m(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vsseg3e16.c b/auto-generated/bfloat16/llvm-api-tests/vsseg3e16.c new file mode 100644 index 000000000..b6e0c0949 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vsseg3e16.c @@ -0,0 +1,44 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsseg3e16_v_bf16mf4x3(__bf16 *rs1, vbfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_bf16mf4x3(rs1, vs3, vl); +} + +void test_vsseg3e16_v_bf16mf2x3(__bf16 *rs1, vbfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_bf16mf2x3(rs1, vs3, vl); +} + +void test_vsseg3e16_v_bf16m1x3(__bf16 *rs1, vbfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_bf16m1x3(rs1, vs3, vl); +} + +void test_vsseg3e16_v_bf16m2x3(__bf16 *rs1, vbfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_bf16m2x3(rs1, vs3, vl); +} + +void test_vsseg3e16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_bf16mf4x3_m(vm, rs1, vs3, vl); +} + +void test_vsseg3e16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16_v_bf16mf2x3_m(vm, rs1, vs3, vl); +} + +void test_vsseg3e16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1x3_t vs3, + size_t vl) { + return __riscv_vsseg3e16_v_bf16m1x3_m(vm, rs1, vs3, vl); +} + +void test_vsseg3e16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vbfloat16m2x3_t vs3, + size_t vl) { + return __riscv_vsseg3e16_v_bf16m2x3_m(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vsseg4e16.c b/auto-generated/bfloat16/llvm-api-tests/vsseg4e16.c new file mode 100644 index 000000000..027169c27 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vsseg4e16.c @@ -0,0 +1,44 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsseg4e16_v_bf16mf4x4(__bf16 *rs1, vbfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_bf16mf4x4(rs1, vs3, vl); +} + +void test_vsseg4e16_v_bf16mf2x4(__bf16 *rs1, vbfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_bf16mf2x4(rs1, vs3, vl); +} + +void test_vsseg4e16_v_bf16m1x4(__bf16 *rs1, vbfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_bf16m1x4(rs1, vs3, vl); +} + +void test_vsseg4e16_v_bf16m2x4(__bf16 *rs1, vbfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_bf16m2x4(rs1, vs3, vl); +} + +void test_vsseg4e16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_bf16mf4x4_m(vm, rs1, vs3, vl); +} + +void test_vsseg4e16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16_v_bf16mf2x4_m(vm, rs1, vs3, vl); +} + +void test_vsseg4e16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1x4_t vs3, + size_t vl) { + return __riscv_vsseg4e16_v_bf16m1x4_m(vm, rs1, vs3, vl); +} + +void test_vsseg4e16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vbfloat16m2x4_t vs3, + size_t vl) { + return __riscv_vsseg4e16_v_bf16m2x4_m(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vsseg5e16.c b/auto-generated/bfloat16/llvm-api-tests/vsseg5e16.c new file mode 100644 index 000000000..84153c5de --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vsseg5e16.c @@ -0,0 +1,35 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsseg5e16_v_bf16mf4x5(__bf16 *rs1, vbfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsseg5e16_v_bf16mf4x5(rs1, vs3, vl); +} + +void test_vsseg5e16_v_bf16mf2x5(__bf16 *rs1, vbfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e16_v_bf16mf2x5(rs1, vs3, vl); +} + +void test_vsseg5e16_v_bf16m1x5(__bf16 *rs1, vbfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e16_v_bf16m1x5(rs1, vs3, vl); +} + +void test_vsseg5e16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsseg5e16_v_bf16mf4x5_m(vm, rs1, vs3, vl); +} + +void test_vsseg5e16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e16_v_bf16mf2x5_m(vm, rs1, vs3, vl); +} + +void test_vsseg5e16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1x5_t vs3, + size_t vl) { + return __riscv_vsseg5e16_v_bf16m1x5_m(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vsseg6e16.c b/auto-generated/bfloat16/llvm-api-tests/vsseg6e16.c new file mode 100644 index 000000000..42e14cbc3 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vsseg6e16.c @@ -0,0 +1,35 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsseg6e16_v_bf16mf4x6(__bf16 *rs1, vbfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsseg6e16_v_bf16mf4x6(rs1, vs3, vl); +} + +void test_vsseg6e16_v_bf16mf2x6(__bf16 *rs1, vbfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e16_v_bf16mf2x6(rs1, vs3, vl); +} + +void test_vsseg6e16_v_bf16m1x6(__bf16 *rs1, vbfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e16_v_bf16m1x6(rs1, vs3, vl); +} + +void test_vsseg6e16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsseg6e16_v_bf16mf4x6_m(vm, rs1, vs3, vl); +} + +void test_vsseg6e16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e16_v_bf16mf2x6_m(vm, rs1, vs3, vl); +} + +void test_vsseg6e16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1x6_t vs3, + size_t vl) { + return __riscv_vsseg6e16_v_bf16m1x6_m(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vsseg7e16.c b/auto-generated/bfloat16/llvm-api-tests/vsseg7e16.c new file mode 100644 index 000000000..994898a13 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vsseg7e16.c @@ -0,0 +1,35 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsseg7e16_v_bf16mf4x7(__bf16 *rs1, vbfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsseg7e16_v_bf16mf4x7(rs1, vs3, vl); +} + +void test_vsseg7e16_v_bf16mf2x7(__bf16 *rs1, vbfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e16_v_bf16mf2x7(rs1, vs3, vl); +} + +void test_vsseg7e16_v_bf16m1x7(__bf16 *rs1, vbfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e16_v_bf16m1x7(rs1, vs3, vl); +} + +void test_vsseg7e16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsseg7e16_v_bf16mf4x7_m(vm, rs1, vs3, vl); +} + +void test_vsseg7e16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e16_v_bf16mf2x7_m(vm, rs1, vs3, vl); +} + +void test_vsseg7e16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1x7_t vs3, + size_t vl) { + return __riscv_vsseg7e16_v_bf16m1x7_m(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vsseg8e16.c b/auto-generated/bfloat16/llvm-api-tests/vsseg8e16.c new file mode 100644 index 000000000..a1e32450e --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vsseg8e16.c @@ -0,0 +1,35 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsseg8e16_v_bf16mf4x8(__bf16 *rs1, vbfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsseg8e16_v_bf16mf4x8(rs1, vs3, vl); +} + +void test_vsseg8e16_v_bf16mf2x8(__bf16 *rs1, vbfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e16_v_bf16mf2x8(rs1, vs3, vl); +} + +void test_vsseg8e16_v_bf16m1x8(__bf16 *rs1, vbfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e16_v_bf16m1x8(rs1, vs3, vl); +} + +void test_vsseg8e16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsseg8e16_v_bf16mf4x8_m(vm, rs1, vs3, vl); +} + +void test_vsseg8e16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e16_v_bf16mf2x8_m(vm, rs1, vs3, vl); +} + +void test_vsseg8e16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1x8_t vs3, + size_t vl) { + return __riscv_vsseg8e16_v_bf16m1x8_m(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vssseg2e16.c b/auto-generated/bfloat16/llvm-api-tests/vssseg2e16.c new file mode 100644 index 000000000..e2f83e96a --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vssseg2e16.c @@ -0,0 +1,58 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vssseg2e16_v_bf16mf4x2(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_bf16mf4x2(rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16mf2x2(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_bf16mf2x2(rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16m1x2(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x2_t vs3, + size_t vl) { + return __riscv_vssseg2e16_v_bf16m1x2(rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16m2x2(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m2x2_t vs3, + size_t vl) { + return __riscv_vssseg2e16_v_bf16m2x2(rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16m4x2(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m4x2_t vs3, + size_t vl) { + return __riscv_vssseg2e16_v_bf16m4x2(rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_bf16mf4x2_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_bf16mf2x2_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_bf16m1x2_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_bf16m2x2_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16_v_bf16m4x2_m(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vssseg3e16.c b/auto-generated/bfloat16/llvm-api-tests/vssseg3e16.c new file mode 100644 index 000000000..c787bae52 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vssseg3e16.c @@ -0,0 +1,48 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vssseg3e16_v_bf16mf4x3(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_bf16mf4x3(rs1, rs2, vs3, vl); +} + +void test_vssseg3e16_v_bf16mf2x3(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_bf16mf2x3(rs1, rs2, vs3, vl); +} + +void test_vssseg3e16_v_bf16m1x3(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x3_t vs3, + size_t vl) { + return __riscv_vssseg3e16_v_bf16m1x3(rs1, rs2, vs3, vl); +} + +void test_vssseg3e16_v_bf16m2x3(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m2x3_t vs3, + size_t vl) { + return __riscv_vssseg3e16_v_bf16m2x3(rs1, rs2, vs3, vl); +} + +void test_vssseg3e16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_bf16mf4x3_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg3e16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_bf16mf2x3_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg3e16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_bf16m1x3_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg3e16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16_v_bf16m2x3_m(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vssseg4e16.c b/auto-generated/bfloat16/llvm-api-tests/vssseg4e16.c new file mode 100644 index 000000000..a8cd816a0 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vssseg4e16.c @@ -0,0 +1,48 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vssseg4e16_v_bf16mf4x4(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_bf16mf4x4(rs1, rs2, vs3, vl); +} + +void test_vssseg4e16_v_bf16mf2x4(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_bf16mf2x4(rs1, rs2, vs3, vl); +} + +void test_vssseg4e16_v_bf16m1x4(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x4_t vs3, + size_t vl) { + return __riscv_vssseg4e16_v_bf16m1x4(rs1, rs2, vs3, vl); +} + +void test_vssseg4e16_v_bf16m2x4(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m2x4_t vs3, + size_t vl) { + return __riscv_vssseg4e16_v_bf16m2x4(rs1, rs2, vs3, vl); +} + +void test_vssseg4e16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_bf16mf4x4_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg4e16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_bf16mf2x4_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg4e16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_bf16m1x4_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg4e16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16_v_bf16m2x4_m(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vssseg5e16.c b/auto-generated/bfloat16/llvm-api-tests/vssseg5e16.c new file mode 100644 index 000000000..8f1c52beb --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vssseg5e16.c @@ -0,0 +1,38 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vssseg5e16_v_bf16mf4x5(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vssseg5e16_v_bf16mf4x5(rs1, rs2, vs3, vl); +} + +void test_vssseg5e16_v_bf16mf2x5(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e16_v_bf16mf2x5(rs1, rs2, vs3, vl); +} + +void test_vssseg5e16_v_bf16m1x5(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x5_t vs3, + size_t vl) { + return __riscv_vssseg5e16_v_bf16m1x5(rs1, rs2, vs3, vl); +} + +void test_vssseg5e16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vssseg5e16_v_bf16mf4x5_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg5e16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e16_v_bf16mf2x5_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg5e16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e16_v_bf16m1x5_m(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vssseg6e16.c b/auto-generated/bfloat16/llvm-api-tests/vssseg6e16.c new file mode 100644 index 000000000..0ba0d33a7 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vssseg6e16.c @@ -0,0 +1,38 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vssseg6e16_v_bf16mf4x6(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vssseg6e16_v_bf16mf4x6(rs1, rs2, vs3, vl); +} + +void test_vssseg6e16_v_bf16mf2x6(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e16_v_bf16mf2x6(rs1, rs2, vs3, vl); +} + +void test_vssseg6e16_v_bf16m1x6(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x6_t vs3, + size_t vl) { + return __riscv_vssseg6e16_v_bf16m1x6(rs1, rs2, vs3, vl); +} + +void test_vssseg6e16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vssseg6e16_v_bf16mf4x6_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg6e16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e16_v_bf16mf2x6_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg6e16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e16_v_bf16m1x6_m(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vssseg7e16.c b/auto-generated/bfloat16/llvm-api-tests/vssseg7e16.c new file mode 100644 index 000000000..4da8d8681 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vssseg7e16.c @@ -0,0 +1,38 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vssseg7e16_v_bf16mf4x7(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vssseg7e16_v_bf16mf4x7(rs1, rs2, vs3, vl); +} + +void test_vssseg7e16_v_bf16mf2x7(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e16_v_bf16mf2x7(rs1, rs2, vs3, vl); +} + +void test_vssseg7e16_v_bf16m1x7(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x7_t vs3, + size_t vl) { + return __riscv_vssseg7e16_v_bf16m1x7(rs1, rs2, vs3, vl); +} + +void test_vssseg7e16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vssseg7e16_v_bf16mf4x7_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg7e16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e16_v_bf16mf2x7_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg7e16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e16_v_bf16m1x7_m(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vssseg8e16.c b/auto-generated/bfloat16/llvm-api-tests/vssseg8e16.c new file mode 100644 index 000000000..ed3086975 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vssseg8e16.c @@ -0,0 +1,38 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vssseg8e16_v_bf16mf4x8(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vssseg8e16_v_bf16mf4x8(rs1, rs2, vs3, vl); +} + +void test_vssseg8e16_v_bf16mf2x8(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e16_v_bf16mf2x8(rs1, rs2, vs3, vl); +} + +void test_vssseg8e16_v_bf16m1x8(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x8_t vs3, + size_t vl) { + return __riscv_vssseg8e16_v_bf16m1x8(rs1, rs2, vs3, vl); +} + +void test_vssseg8e16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vssseg8e16_v_bf16mf4x8_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg8e16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e16_v_bf16mf2x8_m(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg8e16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e16_v_bf16m1x8_m(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vsuxei16.c b/auto-generated/bfloat16/llvm-api-tests/vsuxei16.c new file mode 100644 index 000000000..fef398b4d --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vsuxei16.c @@ -0,0 +1,68 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsuxei16_v_bf16mf4(__bf16 *rs1, vuint16mf4_t rs2, vbfloat16mf4_t vs3, + size_t vl) { + return __riscv_vsuxei16_v_bf16mf4(rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16mf2(__bf16 *rs1, vuint16mf2_t rs2, vbfloat16mf2_t vs3, + size_t vl) { + return __riscv_vsuxei16_v_bf16mf2(rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m1(__bf16 *rs1, vuint16m1_t rs2, vbfloat16m1_t vs3, + size_t vl) { + return __riscv_vsuxei16_v_bf16m1(rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m2(__bf16 *rs1, vuint16m2_t rs2, vbfloat16m2_t vs3, + size_t vl) { + return __riscv_vsuxei16_v_bf16m2(rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m4(__bf16 *rs1, vuint16m4_t rs2, vbfloat16m4_t vs3, + size_t vl) { + return __riscv_vsuxei16_v_bf16m4(rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m8(__bf16 *rs1, vuint16m8_t rs2, vbfloat16m8_t vs3, + size_t vl) { + return __riscv_vsuxei16_v_bf16m8(rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, vuint16mf4_t rs2, + vbfloat16mf4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_bf16mf4_m(vm, rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, vuint16mf2_t rs2, + vbfloat16mf2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_bf16mf2_m(vm, rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t rs2, + vbfloat16m1_t vs3, size_t vl) { + return __riscv_vsuxei16_v_bf16m1_m(vm, rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t rs2, + vbfloat16m2_t vs3, size_t vl) { + return __riscv_vsuxei16_v_bf16m2_m(vm, rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, vuint16m4_t rs2, + vbfloat16m4_t vs3, size_t vl) { + return __riscv_vsuxei16_v_bf16m4_m(vm, rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m8_m(vbool2_t vm, __bf16 *rs1, vuint16m8_t rs2, + vbfloat16m8_t vs3, size_t vl) { + return __riscv_vsuxei16_v_bf16m8_m(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vsuxseg2ei16.c b/auto-generated/bfloat16/llvm-api-tests/vsuxseg2ei16.c new file mode 100644 index 000000000..5e0b0c230 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vsuxseg2ei16.c @@ -0,0 +1,60 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsuxseg2ei16_v_bf16mf4x2(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_bf16mf4x2(rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16mf2x2(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_bf16mf2x2(rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16m1x2(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_bf16m1x2(rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16m2x2(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_bf16m2x2(rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16m4x2(__bf16 *rs1, vuint16m4_t vs2, + vbfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_bf16m4x2(rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x2_t vs3, + size_t vl) { + return __riscv_vsuxseg2ei16_v_bf16mf4x2_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x2_t vs3, + size_t vl) { + return __riscv_vsuxseg2ei16_v_bf16mf2x2_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_bf16m1x2_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_bf16m2x2_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, vuint16m4_t vs2, + vbfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16_v_bf16m4x2_m(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vsuxseg3ei16.c b/auto-generated/bfloat16/llvm-api-tests/vsuxseg3ei16.c new file mode 100644 index 000000000..d85ff55de --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vsuxseg3ei16.c @@ -0,0 +1,50 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsuxseg3ei16_v_bf16mf4x3(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_bf16mf4x3(rs1, vs2, vs3, vl); +} + +void test_vsuxseg3ei16_v_bf16mf2x3(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_bf16mf2x3(rs1, vs2, vs3, vl); +} + +void test_vsuxseg3ei16_v_bf16m1x3(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_bf16m1x3(rs1, vs2, vs3, vl); +} + +void test_vsuxseg3ei16_v_bf16m2x3(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_bf16m2x3(rs1, vs2, vs3, vl); +} + +void test_vsuxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x3_t vs3, + size_t vl) { + return __riscv_vsuxseg3ei16_v_bf16mf4x3_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x3_t vs3, + size_t vl) { + return __riscv_vsuxseg3ei16_v_bf16mf2x3_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg3ei16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_bf16m1x3_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg3ei16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16_v_bf16m2x3_m(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vsuxseg4ei16.c b/auto-generated/bfloat16/llvm-api-tests/vsuxseg4ei16.c new file mode 100644 index 000000000..24c1dd215 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vsuxseg4ei16.c @@ -0,0 +1,50 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsuxseg4ei16_v_bf16mf4x4(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_bf16mf4x4(rs1, vs2, vs3, vl); +} + +void test_vsuxseg4ei16_v_bf16mf2x4(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_bf16mf2x4(rs1, vs2, vs3, vl); +} + +void test_vsuxseg4ei16_v_bf16m1x4(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_bf16m1x4(rs1, vs2, vs3, vl); +} + +void test_vsuxseg4ei16_v_bf16m2x4(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_bf16m2x4(rs1, vs2, vs3, vl); +} + +void test_vsuxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x4_t vs3, + size_t vl) { + return __riscv_vsuxseg4ei16_v_bf16mf4x4_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x4_t vs3, + size_t vl) { + return __riscv_vsuxseg4ei16_v_bf16mf2x4_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg4ei16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_bf16m1x4_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg4ei16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16_v_bf16m2x4_m(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vsuxseg5ei16.c b/auto-generated/bfloat16/llvm-api-tests/vsuxseg5ei16.c new file mode 100644 index 000000000..826f4408c --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vsuxseg5ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsuxseg5ei16_v_bf16mf4x5(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_bf16mf4x5(rs1, vs2, vs3, vl); +} + +void test_vsuxseg5ei16_v_bf16mf2x5(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_bf16mf2x5(rs1, vs2, vs3, vl); +} + +void test_vsuxseg5ei16_v_bf16m1x5(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_bf16m1x5(rs1, vs2, vs3, vl); +} + +void test_vsuxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x5_t vs3, + size_t vl) { + return __riscv_vsuxseg5ei16_v_bf16mf4x5_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x5_t vs3, + size_t vl) { + return __riscv_vsuxseg5ei16_v_bf16mf2x5_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg5ei16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16_v_bf16m1x5_m(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vsuxseg6ei16.c b/auto-generated/bfloat16/llvm-api-tests/vsuxseg6ei16.c new file mode 100644 index 000000000..c0fbb6b1b --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vsuxseg6ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsuxseg6ei16_v_bf16mf4x6(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_bf16mf4x6(rs1, vs2, vs3, vl); +} + +void test_vsuxseg6ei16_v_bf16mf2x6(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_bf16mf2x6(rs1, vs2, vs3, vl); +} + +void test_vsuxseg6ei16_v_bf16m1x6(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_bf16m1x6(rs1, vs2, vs3, vl); +} + +void test_vsuxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x6_t vs3, + size_t vl) { + return __riscv_vsuxseg6ei16_v_bf16mf4x6_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x6_t vs3, + size_t vl) { + return __riscv_vsuxseg6ei16_v_bf16mf2x6_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg6ei16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16_v_bf16m1x6_m(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vsuxseg7ei16.c b/auto-generated/bfloat16/llvm-api-tests/vsuxseg7ei16.c new file mode 100644 index 000000000..e57536fb8 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vsuxseg7ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsuxseg7ei16_v_bf16mf4x7(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_bf16mf4x7(rs1, vs2, vs3, vl); +} + +void test_vsuxseg7ei16_v_bf16mf2x7(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_bf16mf2x7(rs1, vs2, vs3, vl); +} + +void test_vsuxseg7ei16_v_bf16m1x7(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_bf16m1x7(rs1, vs2, vs3, vl); +} + +void test_vsuxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x7_t vs3, + size_t vl) { + return __riscv_vsuxseg7ei16_v_bf16mf4x7_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x7_t vs3, + size_t vl) { + return __riscv_vsuxseg7ei16_v_bf16mf2x7_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg7ei16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16_v_bf16m1x7_m(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vsuxseg8ei16.c b/auto-generated/bfloat16/llvm-api-tests/vsuxseg8ei16.c new file mode 100644 index 000000000..39b3182fd --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vsuxseg8ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsuxseg8ei16_v_bf16mf4x8(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_bf16mf4x8(rs1, vs2, vs3, vl); +} + +void test_vsuxseg8ei16_v_bf16mf2x8(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_bf16mf2x8(rs1, vs2, vs3, vl); +} + +void test_vsuxseg8ei16_v_bf16m1x8(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_bf16m1x8(rs1, vs2, vs3, vl); +} + +void test_vsuxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x8_t vs3, + size_t vl) { + return __riscv_vsuxseg8ei16_v_bf16mf4x8_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x8_t vs3, + size_t vl) { + return __riscv_vsuxseg8ei16_v_bf16mf2x8_m(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg8ei16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16_v_bf16m1x8_m(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-api-tests/vundefined.c b/auto-generated/bfloat16/llvm-api-tests/vundefined.c new file mode 100644 index 000000000..05d5adb95 --- /dev/null +++ b/auto-generated/bfloat16/llvm-api-tests/vundefined.c @@ -0,0 +1,124 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vundefined_bf16mf4() { + return __riscv_vundefined_bf16mf4(); +} + +vbfloat16mf2_t test_vundefined_bf16mf2() { + return __riscv_vundefined_bf16mf2(); +} + +vbfloat16m1_t test_vundefined_bf16m1() { return __riscv_vundefined_bf16m1(); } + +vbfloat16m2_t test_vundefined_bf16m2() { return __riscv_vundefined_bf16m2(); } + +vbfloat16m4_t test_vundefined_bf16m4() { return __riscv_vundefined_bf16m4(); } + +vbfloat16m8_t test_vundefined_bf16m8() { return __riscv_vundefined_bf16m8(); } + +vbfloat16mf4x2_t test_vundefined_bf16mf4x2() { + return __riscv_vundefined_bf16mf4x2(); +} + +vbfloat16mf4x3_t test_vundefined_bf16mf4x3() { + return __riscv_vundefined_bf16mf4x3(); +} + +vbfloat16mf4x4_t test_vundefined_bf16mf4x4() { + return __riscv_vundefined_bf16mf4x4(); +} + +vbfloat16mf4x5_t test_vundefined_bf16mf4x5() { + return __riscv_vundefined_bf16mf4x5(); +} + +vbfloat16mf4x6_t test_vundefined_bf16mf4x6() { + return __riscv_vundefined_bf16mf4x6(); +} + +vbfloat16mf4x7_t test_vundefined_bf16mf4x7() { + return __riscv_vundefined_bf16mf4x7(); +} + +vbfloat16mf4x8_t test_vundefined_bf16mf4x8() { + return __riscv_vundefined_bf16mf4x8(); +} + +vbfloat16mf2x2_t test_vundefined_bf16mf2x2() { + return __riscv_vundefined_bf16mf2x2(); +} + +vbfloat16mf2x3_t test_vundefined_bf16mf2x3() { + return __riscv_vundefined_bf16mf2x3(); +} + +vbfloat16mf2x4_t test_vundefined_bf16mf2x4() { + return __riscv_vundefined_bf16mf2x4(); +} + +vbfloat16mf2x5_t test_vundefined_bf16mf2x5() { + return __riscv_vundefined_bf16mf2x5(); +} + +vbfloat16mf2x6_t test_vundefined_bf16mf2x6() { + return __riscv_vundefined_bf16mf2x6(); +} + +vbfloat16mf2x7_t test_vundefined_bf16mf2x7() { + return __riscv_vundefined_bf16mf2x7(); +} + +vbfloat16mf2x8_t test_vundefined_bf16mf2x8() { + return __riscv_vundefined_bf16mf2x8(); +} + +vbfloat16m1x2_t test_vundefined_bf16m1x2() { + return __riscv_vundefined_bf16m1x2(); +} + +vbfloat16m1x3_t test_vundefined_bf16m1x3() { + return __riscv_vundefined_bf16m1x3(); +} + +vbfloat16m1x4_t test_vundefined_bf16m1x4() { + return __riscv_vundefined_bf16m1x4(); +} + +vbfloat16m1x5_t test_vundefined_bf16m1x5() { + return __riscv_vundefined_bf16m1x5(); +} + +vbfloat16m1x6_t test_vundefined_bf16m1x6() { + return __riscv_vundefined_bf16m1x6(); +} + +vbfloat16m1x7_t test_vundefined_bf16m1x7() { + return __riscv_vundefined_bf16m1x7(); +} + +vbfloat16m1x8_t test_vundefined_bf16m1x8() { + return __riscv_vundefined_bf16m1x8(); +} + +vbfloat16m2x2_t test_vundefined_bf16m2x2() { + return __riscv_vundefined_bf16m2x2(); +} + +vbfloat16m2x3_t test_vundefined_bf16m2x3() { + return __riscv_vundefined_bf16m2x3(); +} + +vbfloat16m2x4_t test_vundefined_bf16m2x4() { + return __riscv_vundefined_bf16m2x4(); +} + +vbfloat16m4x2_t test_vundefined_bf16m4x2() { + return __riscv_vundefined_bf16m4x2(); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vfncvtbf16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vfncvtbf16.c new file mode 100644 index 000000000..91ddb1751 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vfncvtbf16.c @@ -0,0 +1,98 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f(vs2, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f(vs2, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f(vs2, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f(vs2, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f(vs2, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_m(vbool64_t vm, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f(vm, vs2, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_m(vbool32_t vm, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f(vm, vs2, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f(vm, vs2, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f(vm, vs2, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f(vm, vs2, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_rm(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f(vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_rm(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f(vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f(vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_rm(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f(vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_rm(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f(vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16mf4_t +test_vfncvtbf16_f_f_w_bf16mf4_rm_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f(vm, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_rm_m(vbool32_t vm, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f(vm, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm_m(vbool16_t vm, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f(vm, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_rm_m(vbool8_t vm, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f(vm, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_rm_m(vbool4_t vm, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f(vm, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vfwcvtbf16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vfwcvtbf16.c new file mode 100644 index 000000000..cf1c7f9de --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vfwcvtbf16.c @@ -0,0 +1,53 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f(vs2, vl); +} + +vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f(vs2, vl); +} + +vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f(vs2, vl); +} + +vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f(vs2, vl); +} + +vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f(vs2, vl); +} + +vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f(vm, vs2, vl); +} + +vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f(vm, vs2, vl); +} + +vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f(vm, vs2, vl); +} + +vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f(vm, vs2, vl); +} + +vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f(vm, vs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vfwmaccbf16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vfwmaccbf16.c new file mode 100644 index 000000000..1cc4b64ba --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vfwmaccbf16.c @@ -0,0 +1,229 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2(vfloat32mf2_t vd, vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_rm(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_rm(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_rm(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_rm(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_rm(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_rm(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_rm(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_rm(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_rm(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vget.c b/auto-generated/bfloat16/llvm-overloaded-tests/vget.c new file mode 100644 index 000000000..f208bb712 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vget.c @@ -0,0 +1,146 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16m1_t test_vget_v_bf16m2_bf16m1(vbfloat16m2_t src, size_t index) { + return __riscv_vget_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m4_bf16m1(vbfloat16m4_t src, size_t index) { + return __riscv_vget_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m8_bf16m1(vbfloat16m8_t src, size_t index) { + return __riscv_vget_bf16m1(src, 0); +} + +vbfloat16m2_t test_vget_v_bf16m4_bf16m2(vbfloat16m4_t src, size_t index) { + return __riscv_vget_bf16m2(src, 0); +} + +vbfloat16m2_t test_vget_v_bf16m8_bf16m2(vbfloat16m8_t src, size_t index) { + return __riscv_vget_bf16m2(src, 0); +} + +vbfloat16m4_t test_vget_v_bf16m8_bf16m4(vbfloat16m8_t src, size_t index) { + return __riscv_vget_bf16m4(src, 0); +} + +vbfloat16mf4_t test_vget_v_bf16mf4x2_bf16mf4(vbfloat16mf4x2_t src, + size_t index) { + return __riscv_vget_bf16mf4(src, 0); +} + +vbfloat16mf4_t test_vget_v_bf16mf4x3_bf16mf4(vbfloat16mf4x3_t src, + size_t index) { + return __riscv_vget_bf16mf4(src, 0); +} + +vbfloat16mf4_t test_vget_v_bf16mf4x4_bf16mf4(vbfloat16mf4x4_t src, + size_t index) { + return __riscv_vget_bf16mf4(src, 0); +} + +vbfloat16mf4_t test_vget_v_bf16mf4x5_bf16mf4(vbfloat16mf4x5_t src, + size_t index) { + return __riscv_vget_bf16mf4(src, 0); +} + +vbfloat16mf4_t test_vget_v_bf16mf4x6_bf16mf4(vbfloat16mf4x6_t src, + size_t index) { + return __riscv_vget_bf16mf4(src, 0); +} + +vbfloat16mf4_t test_vget_v_bf16mf4x7_bf16mf4(vbfloat16mf4x7_t src, + size_t index) { + return __riscv_vget_bf16mf4(src, 0); +} + +vbfloat16mf4_t test_vget_v_bf16mf4x8_bf16mf4(vbfloat16mf4x8_t src, + size_t index) { + return __riscv_vget_bf16mf4(src, 0); +} + +vbfloat16mf2_t test_vget_v_bf16mf2x2_bf16mf2(vbfloat16mf2x2_t src, + size_t index) { + return __riscv_vget_bf16mf2(src, 0); +} + +vbfloat16mf2_t test_vget_v_bf16mf2x3_bf16mf2(vbfloat16mf2x3_t src, + size_t index) { + return __riscv_vget_bf16mf2(src, 0); +} + +vbfloat16mf2_t test_vget_v_bf16mf2x4_bf16mf2(vbfloat16mf2x4_t src, + size_t index) { + return __riscv_vget_bf16mf2(src, 0); +} + +vbfloat16mf2_t test_vget_v_bf16mf2x5_bf16mf2(vbfloat16mf2x5_t src, + size_t index) { + return __riscv_vget_bf16mf2(src, 0); +} + +vbfloat16mf2_t test_vget_v_bf16mf2x6_bf16mf2(vbfloat16mf2x6_t src, + size_t index) { + return __riscv_vget_bf16mf2(src, 0); +} + +vbfloat16mf2_t test_vget_v_bf16mf2x7_bf16mf2(vbfloat16mf2x7_t src, + size_t index) { + return __riscv_vget_bf16mf2(src, 0); +} + +vbfloat16mf2_t test_vget_v_bf16mf2x8_bf16mf2(vbfloat16mf2x8_t src, + size_t index) { + return __riscv_vget_bf16mf2(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m1x2_bf16m1(vbfloat16m1x2_t src, size_t index) { + return __riscv_vget_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m1x3_bf16m1(vbfloat16m1x3_t src, size_t index) { + return __riscv_vget_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m1x4_bf16m1(vbfloat16m1x4_t src, size_t index) { + return __riscv_vget_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m1x5_bf16m1(vbfloat16m1x5_t src, size_t index) { + return __riscv_vget_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m1x6_bf16m1(vbfloat16m1x6_t src, size_t index) { + return __riscv_vget_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m1x7_bf16m1(vbfloat16m1x7_t src, size_t index) { + return __riscv_vget_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m1x8_bf16m1(vbfloat16m1x8_t src, size_t index) { + return __riscv_vget_bf16m1(src, 0); +} + +vbfloat16m2_t test_vget_v_bf16m2x2_bf16m2(vbfloat16m2x2_t src, size_t index) { + return __riscv_vget_bf16m2(src, 0); +} + +vbfloat16m2_t test_vget_v_bf16m2x3_bf16m2(vbfloat16m2x3_t src, size_t index) { + return __riscv_vget_bf16m2(src, 0); +} + +vbfloat16m2_t test_vget_v_bf16m2x4_bf16m2(vbfloat16m2x4_t src, size_t index) { + return __riscv_vget_bf16m2(src, 0); +} + +vbfloat16m4_t test_vget_v_bf16m4x2_bf16m4(vbfloat16m4x2_t src, size_t index) { + return __riscv_vget_bf16m4(src, 0); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vle16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vle16.c new file mode 100644 index 000000000..6d43b56a7 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vle16.c @@ -0,0 +1,35 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vle16_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16(vm, rs1, vl); +} + +vbfloat16mf2_t test_vle16_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16(vm, rs1, vl); +} + +vbfloat16m1_t test_vle16_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16(vm, rs1, vl); +} + +vbfloat16m2_t test_vle16_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1, size_t vl) { + return __riscv_vle16(vm, rs1, vl); +} + +vbfloat16m4_t test_vle16_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1, size_t vl) { + return __riscv_vle16(vm, rs1, vl); +} + +vbfloat16m8_t test_vle16_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1, size_t vl) { + return __riscv_vle16(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vle16ff.c b/auto-generated/bfloat16/llvm-overloaded-tests/vle16ff.c new file mode 100644 index 000000000..65456faad --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vle16ff.c @@ -0,0 +1,38 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vle16ff_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff(vm, rs1, new_vl, vl); +} + +vbfloat16mf2_t test_vle16ff_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff(vm, rs1, new_vl, vl); +} + +vbfloat16m1_t test_vle16ff_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff(vm, rs1, new_vl, vl); +} + +vbfloat16m2_t test_vle16ff_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff(vm, rs1, new_vl, vl); +} + +vbfloat16m4_t test_vle16ff_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff(vm, rs1, new_vl, vl); +} + +vbfloat16m8_t test_vle16ff_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vlmul_ext_v.c b/auto-generated/bfloat16/llvm-overloaded-tests/vlmul_ext_v.c new file mode 100644 index 000000000..3ec3b27f3 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vlmul_ext_v.c @@ -0,0 +1,68 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf2_t test_vlmul_ext_v_bf16mf4_bf16mf2(vbfloat16mf4_t value) { + return __riscv_vlmul_ext_bf16mf2(value); +} + +vbfloat16m1_t test_vlmul_ext_v_bf16mf4_bf16m1(vbfloat16mf4_t value) { + return __riscv_vlmul_ext_bf16m1(value); +} + +vbfloat16m2_t test_vlmul_ext_v_bf16mf4_bf16m2(vbfloat16mf4_t value) { + return __riscv_vlmul_ext_bf16m2(value); +} + +vbfloat16m4_t test_vlmul_ext_v_bf16mf4_bf16m4(vbfloat16mf4_t value) { + return __riscv_vlmul_ext_bf16m4(value); +} + +vbfloat16m8_t test_vlmul_ext_v_bf16mf4_bf16m8(vbfloat16mf4_t value) { + return __riscv_vlmul_ext_bf16m8(value); +} + +vbfloat16m1_t test_vlmul_ext_v_bf16mf2_bf16m1(vbfloat16mf2_t value) { + return __riscv_vlmul_ext_bf16m1(value); +} + +vbfloat16m2_t test_vlmul_ext_v_bf16mf2_bf16m2(vbfloat16mf2_t value) { + return __riscv_vlmul_ext_bf16m2(value); +} + +vbfloat16m4_t test_vlmul_ext_v_bf16mf2_bf16m4(vbfloat16mf2_t value) { + return __riscv_vlmul_ext_bf16m4(value); +} + +vbfloat16m8_t test_vlmul_ext_v_bf16mf2_bf16m8(vbfloat16mf2_t value) { + return __riscv_vlmul_ext_bf16m8(value); +} + +vbfloat16m2_t test_vlmul_ext_v_bf16m1_bf16m2(vbfloat16m1_t value) { + return __riscv_vlmul_ext_bf16m2(value); +} + +vbfloat16m4_t test_vlmul_ext_v_bf16m1_bf16m4(vbfloat16m1_t value) { + return __riscv_vlmul_ext_bf16m4(value); +} + +vbfloat16m8_t test_vlmul_ext_v_bf16m1_bf16m8(vbfloat16m1_t value) { + return __riscv_vlmul_ext_bf16m8(value); +} + +vbfloat16m4_t test_vlmul_ext_v_bf16m2_bf16m4(vbfloat16m2_t value) { + return __riscv_vlmul_ext_bf16m4(value); +} + +vbfloat16m8_t test_vlmul_ext_v_bf16m2_bf16m8(vbfloat16m2_t value) { + return __riscv_vlmul_ext_bf16m8(value); +} + +vbfloat16m8_t test_vlmul_ext_v_bf16m4_bf16m8(vbfloat16m4_t value) { + return __riscv_vlmul_ext_bf16m8(value); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vlmul_trunc_v.c b/auto-generated/bfloat16/llvm-overloaded-tests/vlmul_trunc_v.c new file mode 100644 index 000000000..cac4f8cc3 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vlmul_trunc_v.c @@ -0,0 +1,68 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vlmul_trunc_v_bf16mf2_bf16mf4(vbfloat16mf2_t value) { + return __riscv_vlmul_trunc_bf16mf4(value); +} + +vbfloat16mf4_t test_vlmul_trunc_v_bf16m1_bf16mf4(vbfloat16m1_t value) { + return __riscv_vlmul_trunc_bf16mf4(value); +} + +vbfloat16mf2_t test_vlmul_trunc_v_bf16m1_bf16mf2(vbfloat16m1_t value) { + return __riscv_vlmul_trunc_bf16mf2(value); +} + +vbfloat16mf4_t test_vlmul_trunc_v_bf16m2_bf16mf4(vbfloat16m2_t value) { + return __riscv_vlmul_trunc_bf16mf4(value); +} + +vbfloat16mf2_t test_vlmul_trunc_v_bf16m2_bf16mf2(vbfloat16m2_t value) { + return __riscv_vlmul_trunc_bf16mf2(value); +} + +vbfloat16m1_t test_vlmul_trunc_v_bf16m2_bf16m1(vbfloat16m2_t value) { + return __riscv_vlmul_trunc_bf16m1(value); +} + +vbfloat16mf4_t test_vlmul_trunc_v_bf16m4_bf16mf4(vbfloat16m4_t value) { + return __riscv_vlmul_trunc_bf16mf4(value); +} + +vbfloat16mf2_t test_vlmul_trunc_v_bf16m4_bf16mf2(vbfloat16m4_t value) { + return __riscv_vlmul_trunc_bf16mf2(value); +} + +vbfloat16m1_t test_vlmul_trunc_v_bf16m4_bf16m1(vbfloat16m4_t value) { + return __riscv_vlmul_trunc_bf16m1(value); +} + +vbfloat16m2_t test_vlmul_trunc_v_bf16m4_bf16m2(vbfloat16m4_t value) { + return __riscv_vlmul_trunc_bf16m2(value); +} + +vbfloat16mf4_t test_vlmul_trunc_v_bf16m8_bf16mf4(vbfloat16m8_t value) { + return __riscv_vlmul_trunc_bf16mf4(value); +} + +vbfloat16mf2_t test_vlmul_trunc_v_bf16m8_bf16mf2(vbfloat16m8_t value) { + return __riscv_vlmul_trunc_bf16mf2(value); +} + +vbfloat16m1_t test_vlmul_trunc_v_bf16m8_bf16m1(vbfloat16m8_t value) { + return __riscv_vlmul_trunc_bf16m1(value); +} + +vbfloat16m2_t test_vlmul_trunc_v_bf16m8_bf16m2(vbfloat16m8_t value) { + return __riscv_vlmul_trunc_bf16m2(value); +} + +vbfloat16m4_t test_vlmul_trunc_v_bf16m8_bf16m4(vbfloat16m8_t value) { + return __riscv_vlmul_trunc_bf16m4(value); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vloxei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vloxei16.c new file mode 100644 index 000000000..69ce719fa --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vloxei16.c @@ -0,0 +1,68 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vloxei16_v_bf16mf4(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); +} + +vbfloat16mf2_t test_vloxei16_v_bf16mf2(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); +} + +vbfloat16m1_t test_vloxei16_v_bf16m1(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); +} + +vbfloat16m2_t test_vloxei16_v_bf16m2(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); +} + +vbfloat16m4_t test_vloxei16_v_bf16m4(const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); +} + +vbfloat16m8_t test_vloxei16_v_bf16m8(const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); +} + +vbfloat16mf4_t test_vloxei16_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vloxei16_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); +} + +vbfloat16m1_t test_vloxei16_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); +} + +vbfloat16m2_t test_vloxei16_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); +} + +vbfloat16m4_t test_vloxei16_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); +} + +vbfloat16m8_t test_vloxei16_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1, + vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vloxseg2ei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vloxseg2ei16.c new file mode 100644 index 000000000..44411bcae --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vloxseg2ei16.c @@ -0,0 +1,60 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2(const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vloxseg3ei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vloxseg3ei16.c new file mode 100644 index 000000000..3ec919166 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vloxseg3ei16.c @@ -0,0 +1,50 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vloxseg4ei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vloxseg4ei16.c new file mode 100644 index 000000000..00ecbd5b7 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vloxseg4ei16.c @@ -0,0 +1,50 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vloxseg5ei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vloxseg5ei16.c new file mode 100644 index 000000000..955414f1a --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vloxseg5ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vloxseg6ei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vloxseg6ei16.c new file mode 100644 index 000000000..e6efc771e --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vloxseg6ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vloxseg7ei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vloxseg7ei16.c new file mode 100644 index 000000000..74198b68b --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vloxseg7ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vloxseg8ei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vloxseg8ei16.c new file mode 100644 index 000000000..d3e550ad2 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vloxseg8ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vlse16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vlse16.c new file mode 100644 index 000000000..00a13d537 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vlse16.c @@ -0,0 +1,38 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vlse16_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16(vm, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vlse16_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16(vm, rs1, rs2, vl); +} + +vbfloat16m1_t test_vlse16_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16(vm, rs1, rs2, vl); +} + +vbfloat16m2_t test_vlse16_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16(vm, rs1, rs2, vl); +} + +vbfloat16m4_t test_vlse16_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16(vm, rs1, rs2, vl); +} + +vbfloat16m8_t test_vlse16_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vlseg2e16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vlseg2e16.c new file mode 100644 index 000000000..b4dc477e0 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vlseg2e16.c @@ -0,0 +1,33 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x2_t test_vlseg2e16_v_bf16mf4x2_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg2e16(vm, rs1, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16_v_bf16mf2x2_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg2e16(vm, rs1, vl); +} + +vbfloat16m1x2_t test_vlseg2e16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg2e16(vm, rs1, vl); +} + +vbfloat16m2x2_t test_vlseg2e16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg2e16(vm, rs1, vl); +} + +vbfloat16m4x2_t test_vlseg2e16_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg2e16(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vlseg2e16ff.c b/auto-generated/bfloat16/llvm-overloaded-tests/vlseg2e16ff.c new file mode 100644 index 000000000..39e46bcff --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vlseg2e16ff.c @@ -0,0 +1,33 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vlseg3e16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vlseg3e16.c new file mode 100644 index 000000000..bd881378c --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vlseg3e16.c @@ -0,0 +1,28 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x3_t test_vlseg3e16_v_bf16mf4x3_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg3e16(vm, rs1, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16_v_bf16mf2x3_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg3e16(vm, rs1, vl); +} + +vbfloat16m1x3_t test_vlseg3e16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg3e16(vm, rs1, vl); +} + +vbfloat16m2x3_t test_vlseg3e16_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg3e16(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vlseg3e16ff.c b/auto-generated/bfloat16/llvm-overloaded-tests/vlseg3e16ff.c new file mode 100644 index 000000000..cdb1e223a --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vlseg3e16ff.c @@ -0,0 +1,28 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vlseg4e16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vlseg4e16.c new file mode 100644 index 000000000..d61b79ca9 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vlseg4e16.c @@ -0,0 +1,28 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x4_t test_vlseg4e16_v_bf16mf4x4_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg4e16(vm, rs1, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16_v_bf16mf2x4_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg4e16(vm, rs1, vl); +} + +vbfloat16m1x4_t test_vlseg4e16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg4e16(vm, rs1, vl); +} + +vbfloat16m2x4_t test_vlseg4e16_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg4e16(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vlseg4e16ff.c b/auto-generated/bfloat16/llvm-overloaded-tests/vlseg4e16ff.c new file mode 100644 index 000000000..ec96a4047 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vlseg4e16ff.c @@ -0,0 +1,28 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vlseg5e16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vlseg5e16.c new file mode 100644 index 000000000..725dd8643 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vlseg5e16.c @@ -0,0 +1,23 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x5_t test_vlseg5e16_v_bf16mf4x5_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg5e16(vm, rs1, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16_v_bf16mf2x5_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg5e16(vm, rs1, vl); +} + +vbfloat16m1x5_t test_vlseg5e16_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg5e16(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vlseg5e16ff.c b/auto-generated/bfloat16/llvm-overloaded-tests/vlseg5e16ff.c new file mode 100644 index 000000000..d8c7f5ced --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vlseg5e16ff.c @@ -0,0 +1,23 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vlseg6e16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vlseg6e16.c new file mode 100644 index 000000000..d0c0be670 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vlseg6e16.c @@ -0,0 +1,23 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x6_t test_vlseg6e16_v_bf16mf4x6_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg6e16(vm, rs1, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16_v_bf16mf2x6_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg6e16(vm, rs1, vl); +} + +vbfloat16m1x6_t test_vlseg6e16_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg6e16(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vlseg6e16ff.c b/auto-generated/bfloat16/llvm-overloaded-tests/vlseg6e16ff.c new file mode 100644 index 000000000..7e34d9b5e --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vlseg6e16ff.c @@ -0,0 +1,23 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vlseg7e16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vlseg7e16.c new file mode 100644 index 000000000..232af118b --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vlseg7e16.c @@ -0,0 +1,23 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x7_t test_vlseg7e16_v_bf16mf4x7_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg7e16(vm, rs1, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16_v_bf16mf2x7_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg7e16(vm, rs1, vl); +} + +vbfloat16m1x7_t test_vlseg7e16_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg7e16(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vlseg7e16ff.c b/auto-generated/bfloat16/llvm-overloaded-tests/vlseg7e16ff.c new file mode 100644 index 000000000..849b63b6f --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vlseg7e16ff.c @@ -0,0 +1,23 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vlseg8e16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vlseg8e16.c new file mode 100644 index 000000000..45b44469a --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vlseg8e16.c @@ -0,0 +1,23 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x8_t test_vlseg8e16_v_bf16mf4x8_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg8e16(vm, rs1, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16_v_bf16mf2x8_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg8e16(vm, rs1, vl); +} + +vbfloat16m1x8_t test_vlseg8e16_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg8e16(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vlseg8e16ff.c b/auto-generated/bfloat16/llvm-overloaded-tests/vlseg8e16ff.c new file mode 100644 index 000000000..a54b6e0df --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vlseg8e16ff.c @@ -0,0 +1,23 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vlsseg2e16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vlsseg2e16.c new file mode 100644 index 000000000..9c6158897 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vlsseg2e16.c @@ -0,0 +1,33 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x2_t test_vlsseg2e16_v_bf16mf4x2_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vlsseg2e16_v_bf16mf2x2_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16(vm, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vlsseg2e16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16(vm, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vlsseg2e16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16(vm, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vlsseg2e16_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vlsseg3e16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vlsseg3e16.c new file mode 100644 index 000000000..f592f50a7 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vlsseg3e16.c @@ -0,0 +1,28 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x3_t test_vlsseg3e16_v_bf16mf4x3_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vlsseg3e16_v_bf16mf2x3_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16(vm, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vlsseg3e16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16(vm, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vlsseg3e16_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vlsseg4e16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vlsseg4e16.c new file mode 100644 index 000000000..92d5e2b5a --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vlsseg4e16.c @@ -0,0 +1,28 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x4_t test_vlsseg4e16_v_bf16mf4x4_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vlsseg4e16_v_bf16mf2x4_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16(vm, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vlsseg4e16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16(vm, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vlsseg4e16_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vlsseg5e16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vlsseg5e16.c new file mode 100644 index 000000000..3433919b2 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vlsseg5e16.c @@ -0,0 +1,23 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x5_t test_vlsseg5e16_v_bf16mf4x5_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vlsseg5e16_v_bf16mf2x5_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16(vm, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vlsseg5e16_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vlsseg6e16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vlsseg6e16.c new file mode 100644 index 000000000..ef3150157 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vlsseg6e16.c @@ -0,0 +1,23 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x6_t test_vlsseg6e16_v_bf16mf4x6_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vlsseg6e16_v_bf16mf2x6_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16(vm, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vlsseg6e16_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vlsseg7e16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vlsseg7e16.c new file mode 100644 index 000000000..a2be41a5a --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vlsseg7e16.c @@ -0,0 +1,23 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x7_t test_vlsseg7e16_v_bf16mf4x7_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vlsseg7e16_v_bf16mf2x7_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16(vm, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vlsseg7e16_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vlsseg8e16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vlsseg8e16.c new file mode 100644 index 000000000..f4f02c887 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vlsseg8e16.c @@ -0,0 +1,23 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x8_t test_vlsseg8e16_v_bf16mf4x8_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vlsseg8e16_v_bf16mf2x8_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16(vm, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vlsseg8e16_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vluxei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vluxei16.c new file mode 100644 index 000000000..dbbb21925 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vluxei16.c @@ -0,0 +1,68 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vluxei16_v_bf16mf4(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); +} + +vbfloat16mf2_t test_vluxei16_v_bf16mf2(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); +} + +vbfloat16m1_t test_vluxei16_v_bf16m1(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); +} + +vbfloat16m2_t test_vluxei16_v_bf16m2(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); +} + +vbfloat16m4_t test_vluxei16_v_bf16m4(const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); +} + +vbfloat16m8_t test_vluxei16_v_bf16m8(const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); +} + +vbfloat16mf4_t test_vluxei16_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vluxei16_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); +} + +vbfloat16m1_t test_vluxei16_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); +} + +vbfloat16m2_t test_vluxei16_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); +} + +vbfloat16m4_t test_vluxei16_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); +} + +vbfloat16m8_t test_vluxei16_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1, + vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vluxseg2ei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vluxseg2ei16.c new file mode 100644 index 000000000..66ff13901 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vluxseg2ei16.c @@ -0,0 +1,60 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2(const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vluxseg3ei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vluxseg3ei16.c new file mode 100644 index 000000000..4ad199b43 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vluxseg3ei16.c @@ -0,0 +1,50 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vluxseg4ei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vluxseg4ei16.c new file mode 100644 index 000000000..ff373110a --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vluxseg4ei16.c @@ -0,0 +1,50 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vluxseg5ei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vluxseg5ei16.c new file mode 100644 index 000000000..26d30bbcd --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vluxseg5ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vluxseg6ei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vluxseg6ei16.c new file mode 100644 index 000000000..7ff324c96 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vluxseg6ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vluxseg7ei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vluxseg7ei16.c new file mode 100644 index 000000000..8567e4eb0 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vluxseg7ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vluxseg8ei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vluxseg8ei16.c new file mode 100644 index 000000000..c58ee1ee3 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vluxseg8ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vmerge.c b/auto-generated/bfloat16/llvm-overloaded-tests/vmerge.c new file mode 100644 index 000000000..d0056e2b5 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vmerge.c @@ -0,0 +1,38 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vmerge_vvm_bf16mf4(vbfloat16mf4_t vs2, vbfloat16mf4_t vs1, + vbool64_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); +} + +vbfloat16mf2_t test_vmerge_vvm_bf16mf2(vbfloat16mf2_t vs2, vbfloat16mf2_t vs1, + vbool32_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); +} + +vbfloat16m1_t test_vmerge_vvm_bf16m1(vbfloat16m1_t vs2, vbfloat16m1_t vs1, + vbool16_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); +} + +vbfloat16m2_t test_vmerge_vvm_bf16m2(vbfloat16m2_t vs2, vbfloat16m2_t vs1, + vbool8_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); +} + +vbfloat16m4_t test_vmerge_vvm_bf16m4(vbfloat16m4_t vs2, vbfloat16m4_t vs1, + vbool4_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); +} + +vbfloat16m8_t test_vmerge_vvm_bf16m8(vbfloat16m8_t vs2, vbfloat16m8_t vs1, + vbool2_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vmv.c b/auto-generated/bfloat16/llvm-overloaded-tests/vmv.c new file mode 100644 index 000000000..43e6807cf --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vmv.c @@ -0,0 +1,32 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vmv_v_v_bf16mf4(vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); +} + +vbfloat16mf2_t test_vmv_v_v_bf16mf2(vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); +} + +vbfloat16m1_t test_vmv_v_v_bf16m1(vbfloat16m1_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); +} + +vbfloat16m2_t test_vmv_v_v_bf16m2(vbfloat16m2_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); +} + +vbfloat16m4_t test_vmv_v_v_bf16m4(vbfloat16m4_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); +} + +vbfloat16m8_t test_vmv_v_v_bf16m8(vbfloat16m8_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vreinterpret.c b/auto-generated/bfloat16/llvm-overloaded-tests/vreinterpret.c new file mode 100644 index 000000000..3ed1b2791 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vreinterpret.c @@ -0,0 +1,104 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vreinterpret_v_i16mf4_bf16mf4(vint16mf4_t src) { + return __riscv_vreinterpret_bf16mf4(src); +} + +vbfloat16mf2_t test_vreinterpret_v_i16mf2_bf16mf2(vint16mf2_t src) { + return __riscv_vreinterpret_bf16mf2(src); +} + +vbfloat16m1_t test_vreinterpret_v_i16m1_bf16m1(vint16m1_t src) { + return __riscv_vreinterpret_bf16m1(src); +} + +vbfloat16m2_t test_vreinterpret_v_i16m2_bf16m2(vint16m2_t src) { + return __riscv_vreinterpret_bf16m2(src); +} + +vbfloat16m4_t test_vreinterpret_v_i16m4_bf16m4(vint16m4_t src) { + return __riscv_vreinterpret_bf16m4(src); +} + +vbfloat16m8_t test_vreinterpret_v_i16m8_bf16m8(vint16m8_t src) { + return __riscv_vreinterpret_bf16m8(src); +} + +vbfloat16mf4_t test_vreinterpret_v_u16mf4_bf16mf4(vuint16mf4_t src) { + return __riscv_vreinterpret_bf16mf4(src); +} + +vbfloat16mf2_t test_vreinterpret_v_u16mf2_bf16mf2(vuint16mf2_t src) { + return __riscv_vreinterpret_bf16mf2(src); +} + +vbfloat16m1_t test_vreinterpret_v_u16m1_bf16m1(vuint16m1_t src) { + return __riscv_vreinterpret_bf16m1(src); +} + +vbfloat16m2_t test_vreinterpret_v_u16m2_bf16m2(vuint16m2_t src) { + return __riscv_vreinterpret_bf16m2(src); +} + +vbfloat16m4_t test_vreinterpret_v_u16m4_bf16m4(vuint16m4_t src) { + return __riscv_vreinterpret_bf16m4(src); +} + +vbfloat16m8_t test_vreinterpret_v_u16m8_bf16m8(vuint16m8_t src) { + return __riscv_vreinterpret_bf16m8(src); +} + +vint16mf4_t test_vreinterpret_v_bf16mf4_i16mf4(vbfloat16mf4_t src) { + return __riscv_vreinterpret_i16mf4(src); +} + +vint16mf2_t test_vreinterpret_v_bf16mf2_i16mf2(vbfloat16mf2_t src) { + return __riscv_vreinterpret_i16mf2(src); +} + +vint16m1_t test_vreinterpret_v_bf16m1_i16m1(vbfloat16m1_t src) { + return __riscv_vreinterpret_i16m1(src); +} + +vint16m2_t test_vreinterpret_v_bf16m2_i16m2(vbfloat16m2_t src) { + return __riscv_vreinterpret_i16m2(src); +} + +vint16m4_t test_vreinterpret_v_bf16m4_i16m4(vbfloat16m4_t src) { + return __riscv_vreinterpret_i16m4(src); +} + +vint16m8_t test_vreinterpret_v_bf16m8_i16m8(vbfloat16m8_t src) { + return __riscv_vreinterpret_i16m8(src); +} + +vuint16mf4_t test_vreinterpret_v_bf16mf4_u16mf4(vbfloat16mf4_t src) { + return __riscv_vreinterpret_u16mf4(src); +} + +vuint16mf2_t test_vreinterpret_v_bf16mf2_u16mf2(vbfloat16mf2_t src) { + return __riscv_vreinterpret_u16mf2(src); +} + +vuint16m1_t test_vreinterpret_v_bf16m1_u16m1(vbfloat16m1_t src) { + return __riscv_vreinterpret_u16m1(src); +} + +vuint16m2_t test_vreinterpret_v_bf16m2_u16m2(vbfloat16m2_t src) { + return __riscv_vreinterpret_u16m2(src); +} + +vuint16m4_t test_vreinterpret_v_bf16m4_u16m4(vbfloat16m4_t src) { + return __riscv_vreinterpret_u16m4(src); +} + +vuint16m8_t test_vreinterpret_v_bf16m8_u16m8(vbfloat16m8_t src) { + return __riscv_vreinterpret_u16m8(src); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vse16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vse16.c new file mode 100644 index 000000000..7458b0fbf --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vse16.c @@ -0,0 +1,62 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vse16_v_bf16mf4(__bf16 *rs1, vbfloat16mf4_t vs3, size_t vl) { + return __riscv_vse16(rs1, vs3, vl); +} + +void test_vse16_v_bf16mf2(__bf16 *rs1, vbfloat16mf2_t vs3, size_t vl) { + return __riscv_vse16(rs1, vs3, vl); +} + +void test_vse16_v_bf16m1(__bf16 *rs1, vbfloat16m1_t vs3, size_t vl) { + return __riscv_vse16(rs1, vs3, vl); +} + +void test_vse16_v_bf16m2(__bf16 *rs1, vbfloat16m2_t vs3, size_t vl) { + return __riscv_vse16(rs1, vs3, vl); +} + +void test_vse16_v_bf16m4(__bf16 *rs1, vbfloat16m4_t vs3, size_t vl) { + return __riscv_vse16(rs1, vs3, vl); +} + +void test_vse16_v_bf16m8(__bf16 *rs1, vbfloat16m8_t vs3, size_t vl) { + return __riscv_vse16(rs1, vs3, vl); +} + +void test_vse16_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, vbfloat16mf4_t vs3, + size_t vl) { + return __riscv_vse16(vm, rs1, vs3, vl); +} + +void test_vse16_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, vbfloat16mf2_t vs3, + size_t vl) { + return __riscv_vse16(vm, rs1, vs3, vl); +} + +void test_vse16_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1_t vs3, + size_t vl) { + return __riscv_vse16(vm, rs1, vs3, vl); +} + +void test_vse16_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, vbfloat16m2_t vs3, + size_t vl) { + return __riscv_vse16(vm, rs1, vs3, vl); +} + +void test_vse16_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, vbfloat16m4_t vs3, + size_t vl) { + return __riscv_vse16(vm, rs1, vs3, vl); +} + +void test_vse16_v_bf16m8_m(vbool2_t vm, __bf16 *rs1, vbfloat16m8_t vs3, + size_t vl) { + return __riscv_vse16(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vset.c b/auto-generated/bfloat16/llvm-overloaded-tests/vset.c new file mode 100644 index 000000000..e5de51a6d --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vset.c @@ -0,0 +1,177 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16m2_t test_vset_v_bf16m1_bf16m2(vbfloat16m2_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m4_t test_vset_v_bf16m1_bf16m4(vbfloat16m4_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m4_t test_vset_v_bf16m2_bf16m4(vbfloat16m4_t dest, size_t index, + vbfloat16m2_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m8_t test_vset_v_bf16m1_bf16m8(vbfloat16m8_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m8_t test_vset_v_bf16m2_bf16m8(vbfloat16m8_t dest, size_t index, + vbfloat16m2_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m8_t test_vset_v_bf16m4_bf16m8(vbfloat16m8_t dest, size_t index, + vbfloat16m4_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16mf4x2_t test_vset_v_bf16mf4_bf16mf4x2(vbfloat16mf4x2_t dest, + size_t index, + vbfloat16mf4_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16mf4x3_t test_vset_v_bf16mf4_bf16mf4x3(vbfloat16mf4x3_t dest, + size_t index, + vbfloat16mf4_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16mf4x4_t test_vset_v_bf16mf4_bf16mf4x4(vbfloat16mf4x4_t dest, + size_t index, + vbfloat16mf4_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16mf4x5_t test_vset_v_bf16mf4_bf16mf4x5(vbfloat16mf4x5_t dest, + size_t index, + vbfloat16mf4_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16mf4x6_t test_vset_v_bf16mf4_bf16mf4x6(vbfloat16mf4x6_t dest, + size_t index, + vbfloat16mf4_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16mf4x7_t test_vset_v_bf16mf4_bf16mf4x7(vbfloat16mf4x7_t dest, + size_t index, + vbfloat16mf4_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16mf4x8_t test_vset_v_bf16mf4_bf16mf4x8(vbfloat16mf4x8_t dest, + size_t index, + vbfloat16mf4_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16mf2x2_t test_vset_v_bf16mf2_bf16mf2x2(vbfloat16mf2x2_t dest, + size_t index, + vbfloat16mf2_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16mf2x3_t test_vset_v_bf16mf2_bf16mf2x3(vbfloat16mf2x3_t dest, + size_t index, + vbfloat16mf2_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16mf2x4_t test_vset_v_bf16mf2_bf16mf2x4(vbfloat16mf2x4_t dest, + size_t index, + vbfloat16mf2_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16mf2x5_t test_vset_v_bf16mf2_bf16mf2x5(vbfloat16mf2x5_t dest, + size_t index, + vbfloat16mf2_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16mf2x6_t test_vset_v_bf16mf2_bf16mf2x6(vbfloat16mf2x6_t dest, + size_t index, + vbfloat16mf2_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16mf2x7_t test_vset_v_bf16mf2_bf16mf2x7(vbfloat16mf2x7_t dest, + size_t index, + vbfloat16mf2_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16mf2x8_t test_vset_v_bf16mf2_bf16mf2x8(vbfloat16mf2x8_t dest, + size_t index, + vbfloat16mf2_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m1x2_t test_vset_v_bf16m1_bf16m1x2(vbfloat16m1x2_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m1x3_t test_vset_v_bf16m1_bf16m1x3(vbfloat16m1x3_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m1x4_t test_vset_v_bf16m1_bf16m1x4(vbfloat16m1x4_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m1x5_t test_vset_v_bf16m1_bf16m1x5(vbfloat16m1x5_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m1x6_t test_vset_v_bf16m1_bf16m1x6(vbfloat16m1x6_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m1x7_t test_vset_v_bf16m1_bf16m1x7(vbfloat16m1x7_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m1x8_t test_vset_v_bf16m1_bf16m1x8(vbfloat16m1x8_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m2x2_t test_vset_v_bf16m2_bf16m2x2(vbfloat16m2x2_t dest, size_t index, + vbfloat16m2_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m2x3_t test_vset_v_bf16m2_bf16m2x3(vbfloat16m2x3_t dest, size_t index, + vbfloat16m2_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m2x4_t test_vset_v_bf16m2_bf16m2x4(vbfloat16m2x4_t dest, size_t index, + vbfloat16m2_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m4x2_t test_vset_v_bf16m4_bf16m4x2(vbfloat16m4x2_t dest, size_t index, + vbfloat16m4_t value) { + return __riscv_vset(dest, 0, value); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vsoxei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vsoxei16.c new file mode 100644 index 000000000..6c3065418 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vsoxei16.c @@ -0,0 +1,68 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsoxei16_v_bf16mf4(__bf16 *rs1, vuint16mf4_t rs2, vbfloat16mf4_t vs3, + size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16mf2(__bf16 *rs1, vuint16mf2_t rs2, vbfloat16mf2_t vs3, + size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m1(__bf16 *rs1, vuint16m1_t rs2, vbfloat16m1_t vs3, + size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m2(__bf16 *rs1, vuint16m2_t rs2, vbfloat16m2_t vs3, + size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m4(__bf16 *rs1, vuint16m4_t rs2, vbfloat16m4_t vs3, + size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m8(__bf16 *rs1, vuint16m8_t rs2, vbfloat16m8_t vs3, + size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, vuint16mf4_t rs2, + vbfloat16mf4_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, vuint16mf2_t rs2, + vbfloat16mf2_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t rs2, + vbfloat16m1_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t rs2, + vbfloat16m2_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, vuint16m4_t rs2, + vbfloat16m4_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m8_m(vbool2_t vm, __bf16 *rs1, vuint16m8_t rs2, + vbfloat16m8_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vsoxseg2ei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vsoxseg2ei16.c new file mode 100644 index 000000000..bf48ad80d --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vsoxseg2ei16.c @@ -0,0 +1,60 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsoxseg2ei16_v_bf16mf4x2(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16mf2x2(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16m1x2(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16m2x2(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16m4x2(__bf16 *rs1, vuint16m4_t vs2, + vbfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x2_t vs3, + size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x2_t vs3, + size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, vuint16m4_t vs2, + vbfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vsoxseg3ei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vsoxseg3ei16.c new file mode 100644 index 000000000..0c8a722fa --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vsoxseg3ei16.c @@ -0,0 +1,50 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsoxseg3ei16_v_bf16mf4x3(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg3ei16_v_bf16mf2x3(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg3ei16_v_bf16m1x3(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg3ei16_v_bf16m2x3(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x3_t vs3, + size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x3_t vs3, + size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg3ei16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg3ei16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vsoxseg4ei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vsoxseg4ei16.c new file mode 100644 index 000000000..7ce48db47 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vsoxseg4ei16.c @@ -0,0 +1,50 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsoxseg4ei16_v_bf16mf4x4(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg4ei16_v_bf16mf2x4(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg4ei16_v_bf16m1x4(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg4ei16_v_bf16m2x4(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x4_t vs3, + size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x4_t vs3, + size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg4ei16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg4ei16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vsoxseg5ei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vsoxseg5ei16.c new file mode 100644 index 000000000..b2873fc30 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vsoxseg5ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsoxseg5ei16_v_bf16mf4x5(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg5ei16_v_bf16mf2x5(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg5ei16_v_bf16m1x5(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x5_t vs3, + size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x5_t vs3, + size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg5ei16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vsoxseg6ei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vsoxseg6ei16.c new file mode 100644 index 000000000..adacd0f6d --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vsoxseg6ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsoxseg6ei16_v_bf16mf4x6(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg6ei16_v_bf16mf2x6(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg6ei16_v_bf16m1x6(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x6_t vs3, + size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x6_t vs3, + size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg6ei16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vsoxseg7ei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vsoxseg7ei16.c new file mode 100644 index 000000000..70c95852e --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vsoxseg7ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsoxseg7ei16_v_bf16mf4x7(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg7ei16_v_bf16mf2x7(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg7ei16_v_bf16m1x7(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x7_t vs3, + size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x7_t vs3, + size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg7ei16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vsoxseg8ei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vsoxseg8ei16.c new file mode 100644 index 000000000..43dfb6b48 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vsoxseg8ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsoxseg8ei16_v_bf16mf4x8(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg8ei16_v_bf16mf2x8(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg8ei16_v_bf16m1x8(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x8_t vs3, + size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x8_t vs3, + size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg8ei16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vsse16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vsse16.c new file mode 100644 index 000000000..4fee05466 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vsse16.c @@ -0,0 +1,68 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsse16_v_bf16mf4(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf4_t vs3, + size_t vl) { + return __riscv_vsse16(rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16mf2(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf2_t vs3, + size_t vl) { + return __riscv_vsse16(rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m1(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1_t vs3, + size_t vl) { + return __riscv_vsse16(rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m2(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m2_t vs3, + size_t vl) { + return __riscv_vsse16(rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m4(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m4_t vs3, + size_t vl) { + return __riscv_vsse16(rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m8(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m8_t vs3, + size_t vl) { + return __riscv_vsse16(rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4_t vs3, size_t vl) { + return __riscv_vsse16(vm, rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2_t vs3, size_t vl) { + return __riscv_vsse16(vm, rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1_t vs3, size_t vl) { + return __riscv_vsse16(vm, rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2_t vs3, size_t vl) { + return __riscv_vsse16(vm, rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m4_t vs3, size_t vl) { + return __riscv_vsse16(vm, rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m8_m(vbool2_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m8_t vs3, size_t vl) { + return __riscv_vsse16(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vsseg2e16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vsseg2e16.c new file mode 100644 index 000000000..4687d6301 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vsseg2e16.c @@ -0,0 +1,53 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsseg2e16_v_bf16mf4x2(__bf16 *rs1, vbfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16mf2x2(__bf16 *rs1, vbfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16m1x2(__bf16 *rs1, vbfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16m2x2(__bf16 *rs1, vbfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16m4x2(__bf16 *rs1, vbfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(vm, rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(vm, rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1x2_t vs3, + size_t vl) { + return __riscv_vsseg2e16(vm, rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vbfloat16m2x2_t vs3, + size_t vl) { + return __riscv_vsseg2e16(vm, rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, vbfloat16m4x2_t vs3, + size_t vl) { + return __riscv_vsseg2e16(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vsseg3e16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vsseg3e16.c new file mode 100644 index 000000000..4e191bdfa --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vsseg3e16.c @@ -0,0 +1,44 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsseg3e16_v_bf16mf4x3(__bf16 *rs1, vbfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(rs1, vs3, vl); +} + +void test_vsseg3e16_v_bf16mf2x3(__bf16 *rs1, vbfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(rs1, vs3, vl); +} + +void test_vsseg3e16_v_bf16m1x3(__bf16 *rs1, vbfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(rs1, vs3, vl); +} + +void test_vsseg3e16_v_bf16m2x3(__bf16 *rs1, vbfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(rs1, vs3, vl); +} + +void test_vsseg3e16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(vm, rs1, vs3, vl); +} + +void test_vsseg3e16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(vm, rs1, vs3, vl); +} + +void test_vsseg3e16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1x3_t vs3, + size_t vl) { + return __riscv_vsseg3e16(vm, rs1, vs3, vl); +} + +void test_vsseg3e16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vbfloat16m2x3_t vs3, + size_t vl) { + return __riscv_vsseg3e16(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vsseg4e16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vsseg4e16.c new file mode 100644 index 000000000..82d027fa7 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vsseg4e16.c @@ -0,0 +1,44 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsseg4e16_v_bf16mf4x4(__bf16 *rs1, vbfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(rs1, vs3, vl); +} + +void test_vsseg4e16_v_bf16mf2x4(__bf16 *rs1, vbfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(rs1, vs3, vl); +} + +void test_vsseg4e16_v_bf16m1x4(__bf16 *rs1, vbfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(rs1, vs3, vl); +} + +void test_vsseg4e16_v_bf16m2x4(__bf16 *rs1, vbfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(rs1, vs3, vl); +} + +void test_vsseg4e16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(vm, rs1, vs3, vl); +} + +void test_vsseg4e16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(vm, rs1, vs3, vl); +} + +void test_vsseg4e16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1x4_t vs3, + size_t vl) { + return __riscv_vsseg4e16(vm, rs1, vs3, vl); +} + +void test_vsseg4e16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vbfloat16m2x4_t vs3, + size_t vl) { + return __riscv_vsseg4e16(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vsseg5e16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vsseg5e16.c new file mode 100644 index 000000000..b1bad6227 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vsseg5e16.c @@ -0,0 +1,35 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsseg5e16_v_bf16mf4x5(__bf16 *rs1, vbfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsseg5e16(rs1, vs3, vl); +} + +void test_vsseg5e16_v_bf16mf2x5(__bf16 *rs1, vbfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e16(rs1, vs3, vl); +} + +void test_vsseg5e16_v_bf16m1x5(__bf16 *rs1, vbfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e16(rs1, vs3, vl); +} + +void test_vsseg5e16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsseg5e16(vm, rs1, vs3, vl); +} + +void test_vsseg5e16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e16(vm, rs1, vs3, vl); +} + +void test_vsseg5e16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1x5_t vs3, + size_t vl) { + return __riscv_vsseg5e16(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vsseg6e16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vsseg6e16.c new file mode 100644 index 000000000..1cbb02f40 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vsseg6e16.c @@ -0,0 +1,35 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsseg6e16_v_bf16mf4x6(__bf16 *rs1, vbfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsseg6e16(rs1, vs3, vl); +} + +void test_vsseg6e16_v_bf16mf2x6(__bf16 *rs1, vbfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e16(rs1, vs3, vl); +} + +void test_vsseg6e16_v_bf16m1x6(__bf16 *rs1, vbfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e16(rs1, vs3, vl); +} + +void test_vsseg6e16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsseg6e16(vm, rs1, vs3, vl); +} + +void test_vsseg6e16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e16(vm, rs1, vs3, vl); +} + +void test_vsseg6e16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1x6_t vs3, + size_t vl) { + return __riscv_vsseg6e16(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vsseg7e16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vsseg7e16.c new file mode 100644 index 000000000..d1bba335b --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vsseg7e16.c @@ -0,0 +1,35 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsseg7e16_v_bf16mf4x7(__bf16 *rs1, vbfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsseg7e16(rs1, vs3, vl); +} + +void test_vsseg7e16_v_bf16mf2x7(__bf16 *rs1, vbfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e16(rs1, vs3, vl); +} + +void test_vsseg7e16_v_bf16m1x7(__bf16 *rs1, vbfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e16(rs1, vs3, vl); +} + +void test_vsseg7e16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsseg7e16(vm, rs1, vs3, vl); +} + +void test_vsseg7e16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e16(vm, rs1, vs3, vl); +} + +void test_vsseg7e16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1x7_t vs3, + size_t vl) { + return __riscv_vsseg7e16(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vsseg8e16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vsseg8e16.c new file mode 100644 index 000000000..bda6e7050 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vsseg8e16.c @@ -0,0 +1,35 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsseg8e16_v_bf16mf4x8(__bf16 *rs1, vbfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsseg8e16(rs1, vs3, vl); +} + +void test_vsseg8e16_v_bf16mf2x8(__bf16 *rs1, vbfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e16(rs1, vs3, vl); +} + +void test_vsseg8e16_v_bf16m1x8(__bf16 *rs1, vbfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e16(rs1, vs3, vl); +} + +void test_vsseg8e16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsseg8e16(vm, rs1, vs3, vl); +} + +void test_vsseg8e16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e16(vm, rs1, vs3, vl); +} + +void test_vsseg8e16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1x8_t vs3, + size_t vl) { + return __riscv_vsseg8e16(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vssseg2e16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vssseg2e16.c new file mode 100644 index 000000000..d81dc61f1 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vssseg2e16.c @@ -0,0 +1,58 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vssseg2e16_v_bf16mf4x2(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16mf2x2(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16m1x2(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x2_t vs3, + size_t vl) { + return __riscv_vssseg2e16(rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16m2x2(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m2x2_t vs3, + size_t vl) { + return __riscv_vssseg2e16(rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16m4x2(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m4x2_t vs3, + size_t vl) { + return __riscv_vssseg2e16(rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vssseg3e16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vssseg3e16.c new file mode 100644 index 000000000..b3ac44719 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vssseg3e16.c @@ -0,0 +1,48 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vssseg3e16_v_bf16mf4x3(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(rs1, rs2, vs3, vl); +} + +void test_vssseg3e16_v_bf16mf2x3(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(rs1, rs2, vs3, vl); +} + +void test_vssseg3e16_v_bf16m1x3(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x3_t vs3, + size_t vl) { + return __riscv_vssseg3e16(rs1, rs2, vs3, vl); +} + +void test_vssseg3e16_v_bf16m2x3(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m2x3_t vs3, + size_t vl) { + return __riscv_vssseg3e16(rs1, rs2, vs3, vl); +} + +void test_vssseg3e16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg3e16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg3e16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg3e16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vssseg4e16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vssseg4e16.c new file mode 100644 index 000000000..25960d3de --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vssseg4e16.c @@ -0,0 +1,48 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vssseg4e16_v_bf16mf4x4(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(rs1, rs2, vs3, vl); +} + +void test_vssseg4e16_v_bf16mf2x4(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(rs1, rs2, vs3, vl); +} + +void test_vssseg4e16_v_bf16m1x4(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x4_t vs3, + size_t vl) { + return __riscv_vssseg4e16(rs1, rs2, vs3, vl); +} + +void test_vssseg4e16_v_bf16m2x4(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m2x4_t vs3, + size_t vl) { + return __riscv_vssseg4e16(rs1, rs2, vs3, vl); +} + +void test_vssseg4e16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg4e16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg4e16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg4e16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vssseg5e16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vssseg5e16.c new file mode 100644 index 000000000..b4104cb7d --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vssseg5e16.c @@ -0,0 +1,38 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vssseg5e16_v_bf16mf4x5(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vssseg5e16(rs1, rs2, vs3, vl); +} + +void test_vssseg5e16_v_bf16mf2x5(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e16(rs1, rs2, vs3, vl); +} + +void test_vssseg5e16_v_bf16m1x5(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x5_t vs3, + size_t vl) { + return __riscv_vssseg5e16(rs1, rs2, vs3, vl); +} + +void test_vssseg5e16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vssseg5e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg5e16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg5e16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e16(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vssseg6e16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vssseg6e16.c new file mode 100644 index 000000000..06f50a29b --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vssseg6e16.c @@ -0,0 +1,38 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vssseg6e16_v_bf16mf4x6(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vssseg6e16(rs1, rs2, vs3, vl); +} + +void test_vssseg6e16_v_bf16mf2x6(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e16(rs1, rs2, vs3, vl); +} + +void test_vssseg6e16_v_bf16m1x6(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x6_t vs3, + size_t vl) { + return __riscv_vssseg6e16(rs1, rs2, vs3, vl); +} + +void test_vssseg6e16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vssseg6e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg6e16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg6e16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e16(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vssseg7e16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vssseg7e16.c new file mode 100644 index 000000000..4078be92a --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vssseg7e16.c @@ -0,0 +1,38 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vssseg7e16_v_bf16mf4x7(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vssseg7e16(rs1, rs2, vs3, vl); +} + +void test_vssseg7e16_v_bf16mf2x7(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e16(rs1, rs2, vs3, vl); +} + +void test_vssseg7e16_v_bf16m1x7(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x7_t vs3, + size_t vl) { + return __riscv_vssseg7e16(rs1, rs2, vs3, vl); +} + +void test_vssseg7e16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vssseg7e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg7e16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg7e16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e16(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vssseg8e16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vssseg8e16.c new file mode 100644 index 000000000..6e3d91f21 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vssseg8e16.c @@ -0,0 +1,38 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vssseg8e16_v_bf16mf4x8(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vssseg8e16(rs1, rs2, vs3, vl); +} + +void test_vssseg8e16_v_bf16mf2x8(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e16(rs1, rs2, vs3, vl); +} + +void test_vssseg8e16_v_bf16m1x8(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x8_t vs3, + size_t vl) { + return __riscv_vssseg8e16(rs1, rs2, vs3, vl); +} + +void test_vssseg8e16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vssseg8e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg8e16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg8e16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e16(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vsuxei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vsuxei16.c new file mode 100644 index 000000000..49414ce68 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vsuxei16.c @@ -0,0 +1,68 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsuxei16_v_bf16mf4(__bf16 *rs1, vuint16mf4_t rs2, vbfloat16mf4_t vs3, + size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16mf2(__bf16 *rs1, vuint16mf2_t rs2, vbfloat16mf2_t vs3, + size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m1(__bf16 *rs1, vuint16m1_t rs2, vbfloat16m1_t vs3, + size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m2(__bf16 *rs1, vuint16m2_t rs2, vbfloat16m2_t vs3, + size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m4(__bf16 *rs1, vuint16m4_t rs2, vbfloat16m4_t vs3, + size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m8(__bf16 *rs1, vuint16m8_t rs2, vbfloat16m8_t vs3, + size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, vuint16mf4_t rs2, + vbfloat16mf4_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, vuint16mf2_t rs2, + vbfloat16mf2_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t rs2, + vbfloat16m1_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t rs2, + vbfloat16m2_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, vuint16m4_t rs2, + vbfloat16m4_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m8_m(vbool2_t vm, __bf16 *rs1, vuint16m8_t rs2, + vbfloat16m8_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vsuxseg2ei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vsuxseg2ei16.c new file mode 100644 index 000000000..f069eee67 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vsuxseg2ei16.c @@ -0,0 +1,60 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsuxseg2ei16_v_bf16mf4x2(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16mf2x2(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16m1x2(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16m2x2(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16m4x2(__bf16 *rs1, vuint16m4_t vs2, + vbfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x2_t vs3, + size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x2_t vs3, + size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, vuint16m4_t vs2, + vbfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vsuxseg3ei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vsuxseg3ei16.c new file mode 100644 index 000000000..8e2ff49bf --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vsuxseg3ei16.c @@ -0,0 +1,50 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsuxseg3ei16_v_bf16mf4x3(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg3ei16_v_bf16mf2x3(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg3ei16_v_bf16m1x3(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg3ei16_v_bf16m2x3(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x3_t vs3, + size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x3_t vs3, + size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg3ei16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg3ei16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vsuxseg4ei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vsuxseg4ei16.c new file mode 100644 index 000000000..92fb63fce --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vsuxseg4ei16.c @@ -0,0 +1,50 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsuxseg4ei16_v_bf16mf4x4(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg4ei16_v_bf16mf2x4(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg4ei16_v_bf16m1x4(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg4ei16_v_bf16m2x4(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x4_t vs3, + size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x4_t vs3, + size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg4ei16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg4ei16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vsuxseg5ei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vsuxseg5ei16.c new file mode 100644 index 000000000..2080f8517 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vsuxseg5ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsuxseg5ei16_v_bf16mf4x5(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg5ei16_v_bf16mf2x5(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg5ei16_v_bf16m1x5(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x5_t vs3, + size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x5_t vs3, + size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg5ei16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vsuxseg6ei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vsuxseg6ei16.c new file mode 100644 index 000000000..a47d53ef1 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vsuxseg6ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsuxseg6ei16_v_bf16mf4x6(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg6ei16_v_bf16mf2x6(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg6ei16_v_bf16m1x6(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x6_t vs3, + size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x6_t vs3, + size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg6ei16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vsuxseg7ei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vsuxseg7ei16.c new file mode 100644 index 000000000..d1ea14ab4 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vsuxseg7ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsuxseg7ei16_v_bf16mf4x7(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg7ei16_v_bf16mf2x7(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg7ei16_v_bf16m1x7(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x7_t vs3, + size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x7_t vs3, + size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg7ei16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/llvm-overloaded-tests/vsuxseg8ei16.c b/auto-generated/bfloat16/llvm-overloaded-tests/vsuxseg8ei16.c new file mode 100644 index 000000000..c0a23fde3 --- /dev/null +++ b/auto-generated/bfloat16/llvm-overloaded-tests/vsuxseg8ei16.c @@ -0,0 +1,40 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +void test_vsuxseg8ei16_v_bf16mf4x8(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg8ei16_v_bf16mf2x8(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg8ei16_v_bf16m1x8(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x8_t vs3, + size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x8_t vs3, + size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg8ei16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vfncvtbf16.c b/auto-generated/bfloat16/overloaded-api-testing/vfncvtbf16.c new file mode 100644 index 000000000..d402fd187 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vfncvtbf16.c @@ -0,0 +1,92 @@ +#include +#include + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f(vs2, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f(vs2, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f(vs2, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f(vs2, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f(vs2, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_m(vbool64_t vm, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f(vm, vs2, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_m(vbool32_t vm, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f(vm, vs2, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f(vm, vs2, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f(vm, vs2, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f(vm, vs2, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_rm(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f(vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_rm(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f(vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f(vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_rm(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f(vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_rm(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f(vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16mf4_t +test_vfncvtbf16_f_f_w_bf16mf4_rm_m(vbool64_t vm, vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f(vm, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_rm_m(vbool32_t vm, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f(vm, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm_m(vbool16_t vm, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f(vm, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_rm_m(vbool8_t vm, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f(vm, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_rm_m(vbool4_t vm, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f(vm, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vfwcvtbf16.c b/auto-generated/bfloat16/overloaded-api-testing/vfwcvtbf16.c new file mode 100644 index 000000000..9e0306536 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vfwcvtbf16.c @@ -0,0 +1,47 @@ +#include +#include + +vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f(vs2, vl); +} + +vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f(vs2, vl); +} + +vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f(vs2, vl); +} + +vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f(vs2, vl); +} + +vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f(vs2, vl); +} + +vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f(vm, vs2, vl); +} + +vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f(vm, vs2, vl); +} + +vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f(vm, vs2, vl); +} + +vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f(vm, vs2, vl); +} + +vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f(vm, vs2, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vfwmaccbf16.c b/auto-generated/bfloat16/overloaded-api-testing/vfwmaccbf16.c new file mode 100644 index 000000000..19c317e42 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vfwmaccbf16.c @@ -0,0 +1,223 @@ +#include +#include + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2(vfloat32mf2_t vd, vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_rm(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_rm(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_rm(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_rm(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_rm(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_rm(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_rm(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_rm(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_rm(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vget.c b/auto-generated/bfloat16/overloaded-api-testing/vget.c new file mode 100644 index 000000000..f249b3faf --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vget.c @@ -0,0 +1,140 @@ +#include +#include + +vbfloat16m1_t test_vget_v_bf16m2_bf16m1(vbfloat16m2_t src, size_t index) { + return __riscv_vget_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m4_bf16m1(vbfloat16m4_t src, size_t index) { + return __riscv_vget_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m8_bf16m1(vbfloat16m8_t src, size_t index) { + return __riscv_vget_bf16m1(src, 0); +} + +vbfloat16m2_t test_vget_v_bf16m4_bf16m2(vbfloat16m4_t src, size_t index) { + return __riscv_vget_bf16m2(src, 0); +} + +vbfloat16m2_t test_vget_v_bf16m8_bf16m2(vbfloat16m8_t src, size_t index) { + return __riscv_vget_bf16m2(src, 0); +} + +vbfloat16m4_t test_vget_v_bf16m8_bf16m4(vbfloat16m8_t src, size_t index) { + return __riscv_vget_bf16m4(src, 0); +} + +vbfloat16mf4_t test_vget_v_bf16mf4x2_bf16mf4(vbfloat16mf4x2_t src, + size_t index) { + return __riscv_vget_bf16mf4(src, 0); +} + +vbfloat16mf4_t test_vget_v_bf16mf4x3_bf16mf4(vbfloat16mf4x3_t src, + size_t index) { + return __riscv_vget_bf16mf4(src, 0); +} + +vbfloat16mf4_t test_vget_v_bf16mf4x4_bf16mf4(vbfloat16mf4x4_t src, + size_t index) { + return __riscv_vget_bf16mf4(src, 0); +} + +vbfloat16mf4_t test_vget_v_bf16mf4x5_bf16mf4(vbfloat16mf4x5_t src, + size_t index) { + return __riscv_vget_bf16mf4(src, 0); +} + +vbfloat16mf4_t test_vget_v_bf16mf4x6_bf16mf4(vbfloat16mf4x6_t src, + size_t index) { + return __riscv_vget_bf16mf4(src, 0); +} + +vbfloat16mf4_t test_vget_v_bf16mf4x7_bf16mf4(vbfloat16mf4x7_t src, + size_t index) { + return __riscv_vget_bf16mf4(src, 0); +} + +vbfloat16mf4_t test_vget_v_bf16mf4x8_bf16mf4(vbfloat16mf4x8_t src, + size_t index) { + return __riscv_vget_bf16mf4(src, 0); +} + +vbfloat16mf2_t test_vget_v_bf16mf2x2_bf16mf2(vbfloat16mf2x2_t src, + size_t index) { + return __riscv_vget_bf16mf2(src, 0); +} + +vbfloat16mf2_t test_vget_v_bf16mf2x3_bf16mf2(vbfloat16mf2x3_t src, + size_t index) { + return __riscv_vget_bf16mf2(src, 0); +} + +vbfloat16mf2_t test_vget_v_bf16mf2x4_bf16mf2(vbfloat16mf2x4_t src, + size_t index) { + return __riscv_vget_bf16mf2(src, 0); +} + +vbfloat16mf2_t test_vget_v_bf16mf2x5_bf16mf2(vbfloat16mf2x5_t src, + size_t index) { + return __riscv_vget_bf16mf2(src, 0); +} + +vbfloat16mf2_t test_vget_v_bf16mf2x6_bf16mf2(vbfloat16mf2x6_t src, + size_t index) { + return __riscv_vget_bf16mf2(src, 0); +} + +vbfloat16mf2_t test_vget_v_bf16mf2x7_bf16mf2(vbfloat16mf2x7_t src, + size_t index) { + return __riscv_vget_bf16mf2(src, 0); +} + +vbfloat16mf2_t test_vget_v_bf16mf2x8_bf16mf2(vbfloat16mf2x8_t src, + size_t index) { + return __riscv_vget_bf16mf2(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m1x2_bf16m1(vbfloat16m1x2_t src, size_t index) { + return __riscv_vget_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m1x3_bf16m1(vbfloat16m1x3_t src, size_t index) { + return __riscv_vget_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m1x4_bf16m1(vbfloat16m1x4_t src, size_t index) { + return __riscv_vget_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m1x5_bf16m1(vbfloat16m1x5_t src, size_t index) { + return __riscv_vget_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m1x6_bf16m1(vbfloat16m1x6_t src, size_t index) { + return __riscv_vget_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m1x7_bf16m1(vbfloat16m1x7_t src, size_t index) { + return __riscv_vget_bf16m1(src, 0); +} + +vbfloat16m1_t test_vget_v_bf16m1x8_bf16m1(vbfloat16m1x8_t src, size_t index) { + return __riscv_vget_bf16m1(src, 0); +} + +vbfloat16m2_t test_vget_v_bf16m2x2_bf16m2(vbfloat16m2x2_t src, size_t index) { + return __riscv_vget_bf16m2(src, 0); +} + +vbfloat16m2_t test_vget_v_bf16m2x3_bf16m2(vbfloat16m2x3_t src, size_t index) { + return __riscv_vget_bf16m2(src, 0); +} + +vbfloat16m2_t test_vget_v_bf16m2x4_bf16m2(vbfloat16m2x4_t src, size_t index) { + return __riscv_vget_bf16m2(src, 0); +} + +vbfloat16m4_t test_vget_v_bf16m4x2_bf16m4(vbfloat16m4x2_t src, size_t index) { + return __riscv_vget_bf16m4(src, 0); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vle16.c b/auto-generated/bfloat16/overloaded-api-testing/vle16.c new file mode 100644 index 000000000..2e0ef5c7c --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vle16.c @@ -0,0 +1,29 @@ +#include +#include + +vbfloat16mf4_t test_vle16_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16(vm, rs1, vl); +} + +vbfloat16mf2_t test_vle16_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16(vm, rs1, vl); +} + +vbfloat16m1_t test_vle16_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16(vm, rs1, vl); +} + +vbfloat16m2_t test_vle16_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1, size_t vl) { + return __riscv_vle16(vm, rs1, vl); +} + +vbfloat16m4_t test_vle16_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1, size_t vl) { + return __riscv_vle16(vm, rs1, vl); +} + +vbfloat16m8_t test_vle16_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1, size_t vl) { + return __riscv_vle16(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vle16ff.c b/auto-generated/bfloat16/overloaded-api-testing/vle16ff.c new file mode 100644 index 000000000..34da33989 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vle16ff.c @@ -0,0 +1,32 @@ +#include +#include + +vbfloat16mf4_t test_vle16ff_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff(vm, rs1, new_vl, vl); +} + +vbfloat16mf2_t test_vle16ff_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff(vm, rs1, new_vl, vl); +} + +vbfloat16m1_t test_vle16ff_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff(vm, rs1, new_vl, vl); +} + +vbfloat16m2_t test_vle16ff_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff(vm, rs1, new_vl, vl); +} + +vbfloat16m4_t test_vle16ff_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff(vm, rs1, new_vl, vl); +} + +vbfloat16m8_t test_vle16ff_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vlmul_ext_v.c b/auto-generated/bfloat16/overloaded-api-testing/vlmul_ext_v.c new file mode 100644 index 000000000..b26e1401c --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vlmul_ext_v.c @@ -0,0 +1,62 @@ +#include +#include + +vbfloat16mf2_t test_vlmul_ext_v_bf16mf4_bf16mf2(vbfloat16mf4_t value) { + return __riscv_vlmul_ext_bf16mf2(value); +} + +vbfloat16m1_t test_vlmul_ext_v_bf16mf4_bf16m1(vbfloat16mf4_t value) { + return __riscv_vlmul_ext_bf16m1(value); +} + +vbfloat16m2_t test_vlmul_ext_v_bf16mf4_bf16m2(vbfloat16mf4_t value) { + return __riscv_vlmul_ext_bf16m2(value); +} + +vbfloat16m4_t test_vlmul_ext_v_bf16mf4_bf16m4(vbfloat16mf4_t value) { + return __riscv_vlmul_ext_bf16m4(value); +} + +vbfloat16m8_t test_vlmul_ext_v_bf16mf4_bf16m8(vbfloat16mf4_t value) { + return __riscv_vlmul_ext_bf16m8(value); +} + +vbfloat16m1_t test_vlmul_ext_v_bf16mf2_bf16m1(vbfloat16mf2_t value) { + return __riscv_vlmul_ext_bf16m1(value); +} + +vbfloat16m2_t test_vlmul_ext_v_bf16mf2_bf16m2(vbfloat16mf2_t value) { + return __riscv_vlmul_ext_bf16m2(value); +} + +vbfloat16m4_t test_vlmul_ext_v_bf16mf2_bf16m4(vbfloat16mf2_t value) { + return __riscv_vlmul_ext_bf16m4(value); +} + +vbfloat16m8_t test_vlmul_ext_v_bf16mf2_bf16m8(vbfloat16mf2_t value) { + return __riscv_vlmul_ext_bf16m8(value); +} + +vbfloat16m2_t test_vlmul_ext_v_bf16m1_bf16m2(vbfloat16m1_t value) { + return __riscv_vlmul_ext_bf16m2(value); +} + +vbfloat16m4_t test_vlmul_ext_v_bf16m1_bf16m4(vbfloat16m1_t value) { + return __riscv_vlmul_ext_bf16m4(value); +} + +vbfloat16m8_t test_vlmul_ext_v_bf16m1_bf16m8(vbfloat16m1_t value) { + return __riscv_vlmul_ext_bf16m8(value); +} + +vbfloat16m4_t test_vlmul_ext_v_bf16m2_bf16m4(vbfloat16m2_t value) { + return __riscv_vlmul_ext_bf16m4(value); +} + +vbfloat16m8_t test_vlmul_ext_v_bf16m2_bf16m8(vbfloat16m2_t value) { + return __riscv_vlmul_ext_bf16m8(value); +} + +vbfloat16m8_t test_vlmul_ext_v_bf16m4_bf16m8(vbfloat16m4_t value) { + return __riscv_vlmul_ext_bf16m8(value); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vlmul_trunc_v.c b/auto-generated/bfloat16/overloaded-api-testing/vlmul_trunc_v.c new file mode 100644 index 000000000..96b46c1e8 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vlmul_trunc_v.c @@ -0,0 +1,62 @@ +#include +#include + +vbfloat16mf4_t test_vlmul_trunc_v_bf16mf2_bf16mf4(vbfloat16mf2_t value) { + return __riscv_vlmul_trunc_bf16mf4(value); +} + +vbfloat16mf4_t test_vlmul_trunc_v_bf16m1_bf16mf4(vbfloat16m1_t value) { + return __riscv_vlmul_trunc_bf16mf4(value); +} + +vbfloat16mf2_t test_vlmul_trunc_v_bf16m1_bf16mf2(vbfloat16m1_t value) { + return __riscv_vlmul_trunc_bf16mf2(value); +} + +vbfloat16mf4_t test_vlmul_trunc_v_bf16m2_bf16mf4(vbfloat16m2_t value) { + return __riscv_vlmul_trunc_bf16mf4(value); +} + +vbfloat16mf2_t test_vlmul_trunc_v_bf16m2_bf16mf2(vbfloat16m2_t value) { + return __riscv_vlmul_trunc_bf16mf2(value); +} + +vbfloat16m1_t test_vlmul_trunc_v_bf16m2_bf16m1(vbfloat16m2_t value) { + return __riscv_vlmul_trunc_bf16m1(value); +} + +vbfloat16mf4_t test_vlmul_trunc_v_bf16m4_bf16mf4(vbfloat16m4_t value) { + return __riscv_vlmul_trunc_bf16mf4(value); +} + +vbfloat16mf2_t test_vlmul_trunc_v_bf16m4_bf16mf2(vbfloat16m4_t value) { + return __riscv_vlmul_trunc_bf16mf2(value); +} + +vbfloat16m1_t test_vlmul_trunc_v_bf16m4_bf16m1(vbfloat16m4_t value) { + return __riscv_vlmul_trunc_bf16m1(value); +} + +vbfloat16m2_t test_vlmul_trunc_v_bf16m4_bf16m2(vbfloat16m4_t value) { + return __riscv_vlmul_trunc_bf16m2(value); +} + +vbfloat16mf4_t test_vlmul_trunc_v_bf16m8_bf16mf4(vbfloat16m8_t value) { + return __riscv_vlmul_trunc_bf16mf4(value); +} + +vbfloat16mf2_t test_vlmul_trunc_v_bf16m8_bf16mf2(vbfloat16m8_t value) { + return __riscv_vlmul_trunc_bf16mf2(value); +} + +vbfloat16m1_t test_vlmul_trunc_v_bf16m8_bf16m1(vbfloat16m8_t value) { + return __riscv_vlmul_trunc_bf16m1(value); +} + +vbfloat16m2_t test_vlmul_trunc_v_bf16m8_bf16m2(vbfloat16m8_t value) { + return __riscv_vlmul_trunc_bf16m2(value); +} + +vbfloat16m4_t test_vlmul_trunc_v_bf16m8_bf16m4(vbfloat16m8_t value) { + return __riscv_vlmul_trunc_bf16m4(value); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vloxei16.c b/auto-generated/bfloat16/overloaded-api-testing/vloxei16.c new file mode 100644 index 000000000..a32bd147c --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vloxei16.c @@ -0,0 +1,62 @@ +#include +#include + +vbfloat16mf4_t test_vloxei16_v_bf16mf4(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); +} + +vbfloat16mf2_t test_vloxei16_v_bf16mf2(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); +} + +vbfloat16m1_t test_vloxei16_v_bf16m1(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); +} + +vbfloat16m2_t test_vloxei16_v_bf16m2(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); +} + +vbfloat16m4_t test_vloxei16_v_bf16m4(const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); +} + +vbfloat16m8_t test_vloxei16_v_bf16m8(const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vloxei16(rs1, rs2, vl); +} + +vbfloat16mf4_t test_vloxei16_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vloxei16_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); +} + +vbfloat16m1_t test_vloxei16_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); +} + +vbfloat16m2_t test_vloxei16_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); +} + +vbfloat16m4_t test_vloxei16_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); +} + +vbfloat16m8_t test_vloxei16_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1, + vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vloxseg2ei16.c b/auto-generated/bfloat16/overloaded-api-testing/vloxseg2ei16.c new file mode 100644 index 000000000..06999c6b0 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vloxseg2ei16.c @@ -0,0 +1,54 @@ +#include +#include + +vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2(const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vloxseg2ei16(rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vloxseg3ei16.c b/auto-generated/bfloat16/overloaded-api-testing/vloxseg3ei16.c new file mode 100644 index 000000000..1534ddfde --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vloxseg3ei16.c @@ -0,0 +1,44 @@ +#include +#include + +vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vloxseg3ei16(rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vloxseg4ei16.c b/auto-generated/bfloat16/overloaded-api-testing/vloxseg4ei16.c new file mode 100644 index 000000000..25543e43b --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vloxseg4ei16.c @@ -0,0 +1,44 @@ +#include +#include + +vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vloxseg4ei16(rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vloxseg5ei16.c b/auto-generated/bfloat16/overloaded-api-testing/vloxseg5ei16.c new file mode 100644 index 000000000..cb842f8d3 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vloxseg5ei16.c @@ -0,0 +1,34 @@ +#include +#include + +vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxseg5ei16(rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vloxseg6ei16.c b/auto-generated/bfloat16/overloaded-api-testing/vloxseg6ei16.c new file mode 100644 index 000000000..866ca7f8c --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vloxseg6ei16.c @@ -0,0 +1,34 @@ +#include +#include + +vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxseg6ei16(rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vloxseg7ei16.c b/auto-generated/bfloat16/overloaded-api-testing/vloxseg7ei16.c new file mode 100644 index 000000000..788934129 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vloxseg7ei16.c @@ -0,0 +1,34 @@ +#include +#include + +vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxseg7ei16(rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vloxseg8ei16.c b/auto-generated/bfloat16/overloaded-api-testing/vloxseg8ei16.c new file mode 100644 index 000000000..001837f44 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vloxseg8ei16.c @@ -0,0 +1,34 @@ +#include +#include + +vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxseg8ei16(rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vlse16.c b/auto-generated/bfloat16/overloaded-api-testing/vlse16.c new file mode 100644 index 000000000..120ce69e7 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vlse16.c @@ -0,0 +1,32 @@ +#include +#include + +vbfloat16mf4_t test_vlse16_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16(vm, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vlse16_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16(vm, rs1, rs2, vl); +} + +vbfloat16m1_t test_vlse16_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16(vm, rs1, rs2, vl); +} + +vbfloat16m2_t test_vlse16_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16(vm, rs1, rs2, vl); +} + +vbfloat16m4_t test_vlse16_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16(vm, rs1, rs2, vl); +} + +vbfloat16m8_t test_vlse16_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vlseg2e16.c b/auto-generated/bfloat16/overloaded-api-testing/vlseg2e16.c new file mode 100644 index 000000000..4d3292d0a --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vlseg2e16.c @@ -0,0 +1,27 @@ +#include +#include + +vbfloat16mf4x2_t test_vlseg2e16_v_bf16mf4x2_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg2e16(vm, rs1, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16_v_bf16mf2x2_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg2e16(vm, rs1, vl); +} + +vbfloat16m1x2_t test_vlseg2e16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg2e16(vm, rs1, vl); +} + +vbfloat16m2x2_t test_vlseg2e16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg2e16(vm, rs1, vl); +} + +vbfloat16m4x2_t test_vlseg2e16_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg2e16(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vlseg2e16ff.c b/auto-generated/bfloat16/overloaded-api-testing/vlseg2e16ff.c new file mode 100644 index 000000000..53ba3ba68 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vlseg2e16ff.c @@ -0,0 +1,27 @@ +#include +#include + +vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vlseg3e16.c b/auto-generated/bfloat16/overloaded-api-testing/vlseg3e16.c new file mode 100644 index 000000000..a3cf2b4de --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vlseg3e16.c @@ -0,0 +1,22 @@ +#include +#include + +vbfloat16mf4x3_t test_vlseg3e16_v_bf16mf4x3_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg3e16(vm, rs1, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16_v_bf16mf2x3_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg3e16(vm, rs1, vl); +} + +vbfloat16m1x3_t test_vlseg3e16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg3e16(vm, rs1, vl); +} + +vbfloat16m2x3_t test_vlseg3e16_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg3e16(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vlseg3e16ff.c b/auto-generated/bfloat16/overloaded-api-testing/vlseg3e16ff.c new file mode 100644 index 000000000..c708c12bf --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vlseg3e16ff.c @@ -0,0 +1,22 @@ +#include +#include + +vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vlseg4e16.c b/auto-generated/bfloat16/overloaded-api-testing/vlseg4e16.c new file mode 100644 index 000000000..4d0994c2a --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vlseg4e16.c @@ -0,0 +1,22 @@ +#include +#include + +vbfloat16mf4x4_t test_vlseg4e16_v_bf16mf4x4_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg4e16(vm, rs1, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16_v_bf16mf2x4_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg4e16(vm, rs1, vl); +} + +vbfloat16m1x4_t test_vlseg4e16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg4e16(vm, rs1, vl); +} + +vbfloat16m2x4_t test_vlseg4e16_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg4e16(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vlseg4e16ff.c b/auto-generated/bfloat16/overloaded-api-testing/vlseg4e16ff.c new file mode 100644 index 000000000..bdfb7c1ed --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vlseg4e16ff.c @@ -0,0 +1,22 @@ +#include +#include + +vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vlseg5e16.c b/auto-generated/bfloat16/overloaded-api-testing/vlseg5e16.c new file mode 100644 index 000000000..0a8e634c5 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vlseg5e16.c @@ -0,0 +1,17 @@ +#include +#include + +vbfloat16mf4x5_t test_vlseg5e16_v_bf16mf4x5_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg5e16(vm, rs1, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16_v_bf16mf2x5_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg5e16(vm, rs1, vl); +} + +vbfloat16m1x5_t test_vlseg5e16_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg5e16(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vlseg5e16ff.c b/auto-generated/bfloat16/overloaded-api-testing/vlseg5e16ff.c new file mode 100644 index 000000000..6e8e9d1f0 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vlseg5e16ff.c @@ -0,0 +1,17 @@ +#include +#include + +vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vlseg6e16.c b/auto-generated/bfloat16/overloaded-api-testing/vlseg6e16.c new file mode 100644 index 000000000..9365aeb8d --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vlseg6e16.c @@ -0,0 +1,17 @@ +#include +#include + +vbfloat16mf4x6_t test_vlseg6e16_v_bf16mf4x6_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg6e16(vm, rs1, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16_v_bf16mf2x6_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg6e16(vm, rs1, vl); +} + +vbfloat16m1x6_t test_vlseg6e16_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg6e16(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vlseg6e16ff.c b/auto-generated/bfloat16/overloaded-api-testing/vlseg6e16ff.c new file mode 100644 index 000000000..8376dd159 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vlseg6e16ff.c @@ -0,0 +1,17 @@ +#include +#include + +vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vlseg7e16.c b/auto-generated/bfloat16/overloaded-api-testing/vlseg7e16.c new file mode 100644 index 000000000..01c7e48d8 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vlseg7e16.c @@ -0,0 +1,17 @@ +#include +#include + +vbfloat16mf4x7_t test_vlseg7e16_v_bf16mf4x7_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg7e16(vm, rs1, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16_v_bf16mf2x7_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg7e16(vm, rs1, vl); +} + +vbfloat16m1x7_t test_vlseg7e16_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg7e16(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vlseg7e16ff.c b/auto-generated/bfloat16/overloaded-api-testing/vlseg7e16ff.c new file mode 100644 index 000000000..8db5ec0e2 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vlseg7e16ff.c @@ -0,0 +1,17 @@ +#include +#include + +vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vlseg8e16.c b/auto-generated/bfloat16/overloaded-api-testing/vlseg8e16.c new file mode 100644 index 000000000..cc5804338 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vlseg8e16.c @@ -0,0 +1,17 @@ +#include +#include + +vbfloat16mf4x8_t test_vlseg8e16_v_bf16mf4x8_m(vbool64_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg8e16(vm, rs1, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16_v_bf16mf2x8_m(vbool32_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg8e16(vm, rs1, vl); +} + +vbfloat16m1x8_t test_vlseg8e16_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, + size_t vl) { + return __riscv_vlseg8e16(vm, rs1, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vlseg8e16ff.c b/auto-generated/bfloat16/overloaded-api-testing/vlseg8e16ff.c new file mode 100644 index 000000000..4011e172e --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vlseg8e16ff.c @@ -0,0 +1,17 @@ +#include +#include + +vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_m(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_m(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff(vm, rs1, new_vl, vl); +} + +vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff(vm, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vlsseg2e16.c b/auto-generated/bfloat16/overloaded-api-testing/vlsseg2e16.c new file mode 100644 index 000000000..53c30e9e0 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vlsseg2e16.c @@ -0,0 +1,27 @@ +#include +#include + +vbfloat16mf4x2_t test_vlsseg2e16_v_bf16mf4x2_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vlsseg2e16_v_bf16mf2x2_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16(vm, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vlsseg2e16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16(vm, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vlsseg2e16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16(vm, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vlsseg2e16_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vlsseg3e16.c b/auto-generated/bfloat16/overloaded-api-testing/vlsseg3e16.c new file mode 100644 index 000000000..b3f3213fa --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vlsseg3e16.c @@ -0,0 +1,22 @@ +#include +#include + +vbfloat16mf4x3_t test_vlsseg3e16_v_bf16mf4x3_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vlsseg3e16_v_bf16mf2x3_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16(vm, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vlsseg3e16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16(vm, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vlsseg3e16_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vlsseg4e16.c b/auto-generated/bfloat16/overloaded-api-testing/vlsseg4e16.c new file mode 100644 index 000000000..b24623f0a --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vlsseg4e16.c @@ -0,0 +1,22 @@ +#include +#include + +vbfloat16mf4x4_t test_vlsseg4e16_v_bf16mf4x4_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vlsseg4e16_v_bf16mf2x4_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16(vm, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vlsseg4e16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16(vm, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vlsseg4e16_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vlsseg5e16.c b/auto-generated/bfloat16/overloaded-api-testing/vlsseg5e16.c new file mode 100644 index 000000000..98e718b17 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vlsseg5e16.c @@ -0,0 +1,17 @@ +#include +#include + +vbfloat16mf4x5_t test_vlsseg5e16_v_bf16mf4x5_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vlsseg5e16_v_bf16mf2x5_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16(vm, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vlsseg5e16_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vlsseg6e16.c b/auto-generated/bfloat16/overloaded-api-testing/vlsseg6e16.c new file mode 100644 index 000000000..9b6a0f74a --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vlsseg6e16.c @@ -0,0 +1,17 @@ +#include +#include + +vbfloat16mf4x6_t test_vlsseg6e16_v_bf16mf4x6_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vlsseg6e16_v_bf16mf2x6_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16(vm, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vlsseg6e16_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vlsseg7e16.c b/auto-generated/bfloat16/overloaded-api-testing/vlsseg7e16.c new file mode 100644 index 000000000..4c25ff34d --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vlsseg7e16.c @@ -0,0 +1,17 @@ +#include +#include + +vbfloat16mf4x7_t test_vlsseg7e16_v_bf16mf4x7_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vlsseg7e16_v_bf16mf2x7_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16(vm, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vlsseg7e16_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vlsseg8e16.c b/auto-generated/bfloat16/overloaded-api-testing/vlsseg8e16.c new file mode 100644 index 000000000..dde6175ca --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vlsseg8e16.c @@ -0,0 +1,17 @@ +#include +#include + +vbfloat16mf4x8_t test_vlsseg8e16_v_bf16mf4x8_m(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vlsseg8e16_v_bf16mf2x8_m(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16(vm, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vlsseg8e16_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vluxei16.c b/auto-generated/bfloat16/overloaded-api-testing/vluxei16.c new file mode 100644 index 000000000..934f2d147 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vluxei16.c @@ -0,0 +1,62 @@ +#include +#include + +vbfloat16mf4_t test_vluxei16_v_bf16mf4(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); +} + +vbfloat16mf2_t test_vluxei16_v_bf16mf2(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); +} + +vbfloat16m1_t test_vluxei16_v_bf16m1(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); +} + +vbfloat16m2_t test_vluxei16_v_bf16m2(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); +} + +vbfloat16m4_t test_vluxei16_v_bf16m4(const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); +} + +vbfloat16m8_t test_vluxei16_v_bf16m8(const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vluxei16(rs1, rs2, vl); +} + +vbfloat16mf4_t test_vluxei16_v_bf16mf4_m(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vluxei16_v_bf16mf2_m(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); +} + +vbfloat16m1_t test_vluxei16_v_bf16m1_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); +} + +vbfloat16m2_t test_vluxei16_v_bf16m2_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); +} + +vbfloat16m4_t test_vluxei16_v_bf16m4_m(vbool4_t vm, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); +} + +vbfloat16m8_t test_vluxei16_v_bf16m8_m(vbool2_t vm, const __bf16 *rs1, + vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vluxseg2ei16.c b/auto-generated/bfloat16/overloaded-api-testing/vluxseg2ei16.c new file mode 100644 index 000000000..73f98c757 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vluxseg2ei16.c @@ -0,0 +1,54 @@ +#include +#include + +vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2(const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vluxseg2ei16(rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_m(vbool4_t vm, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vluxseg3ei16.c b/auto-generated/bfloat16/overloaded-api-testing/vluxseg3ei16.c new file mode 100644 index 000000000..a63c93a80 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vluxseg3ei16.c @@ -0,0 +1,44 @@ +#include +#include + +vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vluxseg3ei16(rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vluxseg4ei16.c b/auto-generated/bfloat16/overloaded-api-testing/vluxseg4ei16.c new file mode 100644 index 000000000..77ad1b7a3 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vluxseg4ei16.c @@ -0,0 +1,44 @@ +#include +#include + +vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vluxseg4ei16(rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_m(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vluxseg5ei16.c b/auto-generated/bfloat16/overloaded-api-testing/vluxseg5ei16.c new file mode 100644 index 000000000..e125c0b5a --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vluxseg5ei16.c @@ -0,0 +1,34 @@ +#include +#include + +vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxseg5ei16(rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vluxseg6ei16.c b/auto-generated/bfloat16/overloaded-api-testing/vluxseg6ei16.c new file mode 100644 index 000000000..570414da9 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vluxseg6ei16.c @@ -0,0 +1,34 @@ +#include +#include + +vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxseg6ei16(rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vluxseg7ei16.c b/auto-generated/bfloat16/overloaded-api-testing/vluxseg7ei16.c new file mode 100644 index 000000000..ecf6bb4ee --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vluxseg7ei16.c @@ -0,0 +1,34 @@ +#include +#include + +vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxseg7ei16(rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vluxseg8ei16.c b/auto-generated/bfloat16/overloaded-api-testing/vluxseg8ei16.c new file mode 100644 index 000000000..bf428cc23 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vluxseg8ei16.c @@ -0,0 +1,34 @@ +#include +#include + +vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8(const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8(const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxseg8ei16(rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_m(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16(vm, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vmerge.c b/auto-generated/bfloat16/overloaded-api-testing/vmerge.c new file mode 100644 index 000000000..6f5617436 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vmerge.c @@ -0,0 +1,32 @@ +#include +#include + +vbfloat16mf4_t test_vmerge_vvm_bf16mf4(vbfloat16mf4_t vs2, vbfloat16mf4_t vs1, + vbool64_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); +} + +vbfloat16mf2_t test_vmerge_vvm_bf16mf2(vbfloat16mf2_t vs2, vbfloat16mf2_t vs1, + vbool32_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); +} + +vbfloat16m1_t test_vmerge_vvm_bf16m1(vbfloat16m1_t vs2, vbfloat16m1_t vs1, + vbool16_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); +} + +vbfloat16m2_t test_vmerge_vvm_bf16m2(vbfloat16m2_t vs2, vbfloat16m2_t vs1, + vbool8_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); +} + +vbfloat16m4_t test_vmerge_vvm_bf16m4(vbfloat16m4_t vs2, vbfloat16m4_t vs1, + vbool4_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); +} + +vbfloat16m8_t test_vmerge_vvm_bf16m8(vbfloat16m8_t vs2, vbfloat16m8_t vs1, + vbool2_t v0, size_t vl) { + return __riscv_vmerge(vs2, vs1, v0, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vmv.c b/auto-generated/bfloat16/overloaded-api-testing/vmv.c new file mode 100644 index 000000000..0c227a944 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vmv.c @@ -0,0 +1,26 @@ +#include +#include + +vbfloat16mf4_t test_vmv_v_v_bf16mf4(vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); +} + +vbfloat16mf2_t test_vmv_v_v_bf16mf2(vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); +} + +vbfloat16m1_t test_vmv_v_v_bf16m1(vbfloat16m1_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); +} + +vbfloat16m2_t test_vmv_v_v_bf16m2(vbfloat16m2_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); +} + +vbfloat16m4_t test_vmv_v_v_bf16m4(vbfloat16m4_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); +} + +vbfloat16m8_t test_vmv_v_v_bf16m8(vbfloat16m8_t vs1, size_t vl) { + return __riscv_vmv_v(vs1, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vreinterpret.c b/auto-generated/bfloat16/overloaded-api-testing/vreinterpret.c new file mode 100644 index 000000000..457fecd65 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vreinterpret.c @@ -0,0 +1,98 @@ +#include +#include + +vbfloat16mf4_t test_vreinterpret_v_i16mf4_bf16mf4(vint16mf4_t src) { + return __riscv_vreinterpret_bf16mf4(src); +} + +vbfloat16mf2_t test_vreinterpret_v_i16mf2_bf16mf2(vint16mf2_t src) { + return __riscv_vreinterpret_bf16mf2(src); +} + +vbfloat16m1_t test_vreinterpret_v_i16m1_bf16m1(vint16m1_t src) { + return __riscv_vreinterpret_bf16m1(src); +} + +vbfloat16m2_t test_vreinterpret_v_i16m2_bf16m2(vint16m2_t src) { + return __riscv_vreinterpret_bf16m2(src); +} + +vbfloat16m4_t test_vreinterpret_v_i16m4_bf16m4(vint16m4_t src) { + return __riscv_vreinterpret_bf16m4(src); +} + +vbfloat16m8_t test_vreinterpret_v_i16m8_bf16m8(vint16m8_t src) { + return __riscv_vreinterpret_bf16m8(src); +} + +vbfloat16mf4_t test_vreinterpret_v_u16mf4_bf16mf4(vuint16mf4_t src) { + return __riscv_vreinterpret_bf16mf4(src); +} + +vbfloat16mf2_t test_vreinterpret_v_u16mf2_bf16mf2(vuint16mf2_t src) { + return __riscv_vreinterpret_bf16mf2(src); +} + +vbfloat16m1_t test_vreinterpret_v_u16m1_bf16m1(vuint16m1_t src) { + return __riscv_vreinterpret_bf16m1(src); +} + +vbfloat16m2_t test_vreinterpret_v_u16m2_bf16m2(vuint16m2_t src) { + return __riscv_vreinterpret_bf16m2(src); +} + +vbfloat16m4_t test_vreinterpret_v_u16m4_bf16m4(vuint16m4_t src) { + return __riscv_vreinterpret_bf16m4(src); +} + +vbfloat16m8_t test_vreinterpret_v_u16m8_bf16m8(vuint16m8_t src) { + return __riscv_vreinterpret_bf16m8(src); +} + +vint16mf4_t test_vreinterpret_v_bf16mf4_i16mf4(vbfloat16mf4_t src) { + return __riscv_vreinterpret_i16mf4(src); +} + +vint16mf2_t test_vreinterpret_v_bf16mf2_i16mf2(vbfloat16mf2_t src) { + return __riscv_vreinterpret_i16mf2(src); +} + +vint16m1_t test_vreinterpret_v_bf16m1_i16m1(vbfloat16m1_t src) { + return __riscv_vreinterpret_i16m1(src); +} + +vint16m2_t test_vreinterpret_v_bf16m2_i16m2(vbfloat16m2_t src) { + return __riscv_vreinterpret_i16m2(src); +} + +vint16m4_t test_vreinterpret_v_bf16m4_i16m4(vbfloat16m4_t src) { + return __riscv_vreinterpret_i16m4(src); +} + +vint16m8_t test_vreinterpret_v_bf16m8_i16m8(vbfloat16m8_t src) { + return __riscv_vreinterpret_i16m8(src); +} + +vuint16mf4_t test_vreinterpret_v_bf16mf4_u16mf4(vbfloat16mf4_t src) { + return __riscv_vreinterpret_u16mf4(src); +} + +vuint16mf2_t test_vreinterpret_v_bf16mf2_u16mf2(vbfloat16mf2_t src) { + return __riscv_vreinterpret_u16mf2(src); +} + +vuint16m1_t test_vreinterpret_v_bf16m1_u16m1(vbfloat16m1_t src) { + return __riscv_vreinterpret_u16m1(src); +} + +vuint16m2_t test_vreinterpret_v_bf16m2_u16m2(vbfloat16m2_t src) { + return __riscv_vreinterpret_u16m2(src); +} + +vuint16m4_t test_vreinterpret_v_bf16m4_u16m4(vbfloat16m4_t src) { + return __riscv_vreinterpret_u16m4(src); +} + +vuint16m8_t test_vreinterpret_v_bf16m8_u16m8(vbfloat16m8_t src) { + return __riscv_vreinterpret_u16m8(src); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vse16.c b/auto-generated/bfloat16/overloaded-api-testing/vse16.c new file mode 100644 index 000000000..923f74fd4 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vse16.c @@ -0,0 +1,56 @@ +#include +#include + +void test_vse16_v_bf16mf4(__bf16 *rs1, vbfloat16mf4_t vs3, size_t vl) { + return __riscv_vse16(rs1, vs3, vl); +} + +void test_vse16_v_bf16mf2(__bf16 *rs1, vbfloat16mf2_t vs3, size_t vl) { + return __riscv_vse16(rs1, vs3, vl); +} + +void test_vse16_v_bf16m1(__bf16 *rs1, vbfloat16m1_t vs3, size_t vl) { + return __riscv_vse16(rs1, vs3, vl); +} + +void test_vse16_v_bf16m2(__bf16 *rs1, vbfloat16m2_t vs3, size_t vl) { + return __riscv_vse16(rs1, vs3, vl); +} + +void test_vse16_v_bf16m4(__bf16 *rs1, vbfloat16m4_t vs3, size_t vl) { + return __riscv_vse16(rs1, vs3, vl); +} + +void test_vse16_v_bf16m8(__bf16 *rs1, vbfloat16m8_t vs3, size_t vl) { + return __riscv_vse16(rs1, vs3, vl); +} + +void test_vse16_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, vbfloat16mf4_t vs3, + size_t vl) { + return __riscv_vse16(vm, rs1, vs3, vl); +} + +void test_vse16_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, vbfloat16mf2_t vs3, + size_t vl) { + return __riscv_vse16(vm, rs1, vs3, vl); +} + +void test_vse16_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1_t vs3, + size_t vl) { + return __riscv_vse16(vm, rs1, vs3, vl); +} + +void test_vse16_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, vbfloat16m2_t vs3, + size_t vl) { + return __riscv_vse16(vm, rs1, vs3, vl); +} + +void test_vse16_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, vbfloat16m4_t vs3, + size_t vl) { + return __riscv_vse16(vm, rs1, vs3, vl); +} + +void test_vse16_v_bf16m8_m(vbool2_t vm, __bf16 *rs1, vbfloat16m8_t vs3, + size_t vl) { + return __riscv_vse16(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vset.c b/auto-generated/bfloat16/overloaded-api-testing/vset.c new file mode 100644 index 000000000..93fe47cd1 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vset.c @@ -0,0 +1,171 @@ +#include +#include + +vbfloat16m2_t test_vset_v_bf16m1_bf16m2(vbfloat16m2_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m4_t test_vset_v_bf16m1_bf16m4(vbfloat16m4_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m4_t test_vset_v_bf16m2_bf16m4(vbfloat16m4_t dest, size_t index, + vbfloat16m2_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m8_t test_vset_v_bf16m1_bf16m8(vbfloat16m8_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m8_t test_vset_v_bf16m2_bf16m8(vbfloat16m8_t dest, size_t index, + vbfloat16m2_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m8_t test_vset_v_bf16m4_bf16m8(vbfloat16m8_t dest, size_t index, + vbfloat16m4_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16mf4x2_t test_vset_v_bf16mf4_bf16mf4x2(vbfloat16mf4x2_t dest, + size_t index, + vbfloat16mf4_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16mf4x3_t test_vset_v_bf16mf4_bf16mf4x3(vbfloat16mf4x3_t dest, + size_t index, + vbfloat16mf4_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16mf4x4_t test_vset_v_bf16mf4_bf16mf4x4(vbfloat16mf4x4_t dest, + size_t index, + vbfloat16mf4_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16mf4x5_t test_vset_v_bf16mf4_bf16mf4x5(vbfloat16mf4x5_t dest, + size_t index, + vbfloat16mf4_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16mf4x6_t test_vset_v_bf16mf4_bf16mf4x6(vbfloat16mf4x6_t dest, + size_t index, + vbfloat16mf4_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16mf4x7_t test_vset_v_bf16mf4_bf16mf4x7(vbfloat16mf4x7_t dest, + size_t index, + vbfloat16mf4_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16mf4x8_t test_vset_v_bf16mf4_bf16mf4x8(vbfloat16mf4x8_t dest, + size_t index, + vbfloat16mf4_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16mf2x2_t test_vset_v_bf16mf2_bf16mf2x2(vbfloat16mf2x2_t dest, + size_t index, + vbfloat16mf2_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16mf2x3_t test_vset_v_bf16mf2_bf16mf2x3(vbfloat16mf2x3_t dest, + size_t index, + vbfloat16mf2_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16mf2x4_t test_vset_v_bf16mf2_bf16mf2x4(vbfloat16mf2x4_t dest, + size_t index, + vbfloat16mf2_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16mf2x5_t test_vset_v_bf16mf2_bf16mf2x5(vbfloat16mf2x5_t dest, + size_t index, + vbfloat16mf2_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16mf2x6_t test_vset_v_bf16mf2_bf16mf2x6(vbfloat16mf2x6_t dest, + size_t index, + vbfloat16mf2_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16mf2x7_t test_vset_v_bf16mf2_bf16mf2x7(vbfloat16mf2x7_t dest, + size_t index, + vbfloat16mf2_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16mf2x8_t test_vset_v_bf16mf2_bf16mf2x8(vbfloat16mf2x8_t dest, + size_t index, + vbfloat16mf2_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m1x2_t test_vset_v_bf16m1_bf16m1x2(vbfloat16m1x2_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m1x3_t test_vset_v_bf16m1_bf16m1x3(vbfloat16m1x3_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m1x4_t test_vset_v_bf16m1_bf16m1x4(vbfloat16m1x4_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m1x5_t test_vset_v_bf16m1_bf16m1x5(vbfloat16m1x5_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m1x6_t test_vset_v_bf16m1_bf16m1x6(vbfloat16m1x6_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m1x7_t test_vset_v_bf16m1_bf16m1x7(vbfloat16m1x7_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m1x8_t test_vset_v_bf16m1_bf16m1x8(vbfloat16m1x8_t dest, size_t index, + vbfloat16m1_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m2x2_t test_vset_v_bf16m2_bf16m2x2(vbfloat16m2x2_t dest, size_t index, + vbfloat16m2_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m2x3_t test_vset_v_bf16m2_bf16m2x3(vbfloat16m2x3_t dest, size_t index, + vbfloat16m2_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m2x4_t test_vset_v_bf16m2_bf16m2x4(vbfloat16m2x4_t dest, size_t index, + vbfloat16m2_t value) { + return __riscv_vset(dest, 0, value); +} + +vbfloat16m4x2_t test_vset_v_bf16m4_bf16m4x2(vbfloat16m4x2_t dest, size_t index, + vbfloat16m4_t value) { + return __riscv_vset(dest, 0, value); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vsoxei16.c b/auto-generated/bfloat16/overloaded-api-testing/vsoxei16.c new file mode 100644 index 000000000..c642d0d2c --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vsoxei16.c @@ -0,0 +1,62 @@ +#include +#include + +void test_vsoxei16_v_bf16mf4(__bf16 *rs1, vuint16mf4_t rs2, vbfloat16mf4_t vs3, + size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16mf2(__bf16 *rs1, vuint16mf2_t rs2, vbfloat16mf2_t vs3, + size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m1(__bf16 *rs1, vuint16m1_t rs2, vbfloat16m1_t vs3, + size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m2(__bf16 *rs1, vuint16m2_t rs2, vbfloat16m2_t vs3, + size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m4(__bf16 *rs1, vuint16m4_t rs2, vbfloat16m4_t vs3, + size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m8(__bf16 *rs1, vuint16m8_t rs2, vbfloat16m8_t vs3, + size_t vl) { + return __riscv_vsoxei16(rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, vuint16mf4_t rs2, + vbfloat16mf4_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, vuint16mf2_t rs2, + vbfloat16mf2_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t rs2, + vbfloat16m1_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t rs2, + vbfloat16m2_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, vuint16m4_t rs2, + vbfloat16m4_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); +} + +void test_vsoxei16_v_bf16m8_m(vbool2_t vm, __bf16 *rs1, vuint16m8_t rs2, + vbfloat16m8_t vs3, size_t vl) { + return __riscv_vsoxei16(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vsoxseg2ei16.c b/auto-generated/bfloat16/overloaded-api-testing/vsoxseg2ei16.c new file mode 100644 index 000000000..04116b6bc --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vsoxseg2ei16.c @@ -0,0 +1,54 @@ +#include +#include + +void test_vsoxseg2ei16_v_bf16mf4x2(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16mf2x2(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16m1x2(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16m2x2(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16m4x2(__bf16 *rs1, vuint16m4_t vs2, + vbfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x2_t vs3, + size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x2_t vs3, + size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg2ei16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, vuint16m4_t vs2, + vbfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsoxseg2ei16(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vsoxseg3ei16.c b/auto-generated/bfloat16/overloaded-api-testing/vsoxseg3ei16.c new file mode 100644 index 000000000..5b573578f --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vsoxseg3ei16.c @@ -0,0 +1,44 @@ +#include +#include + +void test_vsoxseg3ei16_v_bf16mf4x3(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg3ei16_v_bf16mf2x3(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg3ei16_v_bf16m1x3(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg3ei16_v_bf16m2x3(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x3_t vs3, + size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x3_t vs3, + size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg3ei16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg3ei16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsoxseg3ei16(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vsoxseg4ei16.c b/auto-generated/bfloat16/overloaded-api-testing/vsoxseg4ei16.c new file mode 100644 index 000000000..f20ddb725 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vsoxseg4ei16.c @@ -0,0 +1,44 @@ +#include +#include + +void test_vsoxseg4ei16_v_bf16mf4x4(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg4ei16_v_bf16mf2x4(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg4ei16_v_bf16m1x4(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg4ei16_v_bf16m2x4(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x4_t vs3, + size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x4_t vs3, + size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg4ei16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg4ei16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsoxseg4ei16(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vsoxseg5ei16.c b/auto-generated/bfloat16/overloaded-api-testing/vsoxseg5ei16.c new file mode 100644 index 000000000..b3bf5bf37 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vsoxseg5ei16.c @@ -0,0 +1,34 @@ +#include +#include + +void test_vsoxseg5ei16_v_bf16mf4x5(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg5ei16_v_bf16mf2x5(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg5ei16_v_bf16m1x5(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x5_t vs3, + size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x5_t vs3, + size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg5ei16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsoxseg5ei16(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vsoxseg6ei16.c b/auto-generated/bfloat16/overloaded-api-testing/vsoxseg6ei16.c new file mode 100644 index 000000000..271ae1083 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vsoxseg6ei16.c @@ -0,0 +1,34 @@ +#include +#include + +void test_vsoxseg6ei16_v_bf16mf4x6(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg6ei16_v_bf16mf2x6(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg6ei16_v_bf16m1x6(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x6_t vs3, + size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x6_t vs3, + size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg6ei16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsoxseg6ei16(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vsoxseg7ei16.c b/auto-generated/bfloat16/overloaded-api-testing/vsoxseg7ei16.c new file mode 100644 index 000000000..730c15d38 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vsoxseg7ei16.c @@ -0,0 +1,34 @@ +#include +#include + +void test_vsoxseg7ei16_v_bf16mf4x7(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg7ei16_v_bf16mf2x7(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg7ei16_v_bf16m1x7(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x7_t vs3, + size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x7_t vs3, + size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg7ei16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsoxseg7ei16(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vsoxseg8ei16.c b/auto-generated/bfloat16/overloaded-api-testing/vsoxseg8ei16.c new file mode 100644 index 000000000..51bb463d6 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vsoxseg8ei16.c @@ -0,0 +1,34 @@ +#include +#include + +void test_vsoxseg8ei16_v_bf16mf4x8(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg8ei16_v_bf16mf2x8(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg8ei16_v_bf16m1x8(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(rs1, vs2, vs3, vl); +} + +void test_vsoxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x8_t vs3, + size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x8_t vs3, + size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsoxseg8ei16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsoxseg8ei16(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vsse16.c b/auto-generated/bfloat16/overloaded-api-testing/vsse16.c new file mode 100644 index 000000000..e44dc2415 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vsse16.c @@ -0,0 +1,62 @@ +#include +#include + +void test_vsse16_v_bf16mf4(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf4_t vs3, + size_t vl) { + return __riscv_vsse16(rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16mf2(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf2_t vs3, + size_t vl) { + return __riscv_vsse16(rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m1(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1_t vs3, + size_t vl) { + return __riscv_vsse16(rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m2(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m2_t vs3, + size_t vl) { + return __riscv_vsse16(rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m4(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m4_t vs3, + size_t vl) { + return __riscv_vsse16(rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m8(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m8_t vs3, + size_t vl) { + return __riscv_vsse16(rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4_t vs3, size_t vl) { + return __riscv_vsse16(vm, rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2_t vs3, size_t vl) { + return __riscv_vsse16(vm, rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1_t vs3, size_t vl) { + return __riscv_vsse16(vm, rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2_t vs3, size_t vl) { + return __riscv_vsse16(vm, rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m4_t vs3, size_t vl) { + return __riscv_vsse16(vm, rs1, rs2, vs3, vl); +} + +void test_vsse16_v_bf16m8_m(vbool2_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m8_t vs3, size_t vl) { + return __riscv_vsse16(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vsseg2e16.c b/auto-generated/bfloat16/overloaded-api-testing/vsseg2e16.c new file mode 100644 index 000000000..ce09da7a2 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vsseg2e16.c @@ -0,0 +1,47 @@ +#include +#include + +void test_vsseg2e16_v_bf16mf4x2(__bf16 *rs1, vbfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16mf2x2(__bf16 *rs1, vbfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16m1x2(__bf16 *rs1, vbfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16m2x2(__bf16 *rs1, vbfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16m4x2(__bf16 *rs1, vbfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(vm, rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsseg2e16(vm, rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1x2_t vs3, + size_t vl) { + return __riscv_vsseg2e16(vm, rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vbfloat16m2x2_t vs3, + size_t vl) { + return __riscv_vsseg2e16(vm, rs1, vs3, vl); +} + +void test_vsseg2e16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, vbfloat16m4x2_t vs3, + size_t vl) { + return __riscv_vsseg2e16(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vsseg3e16.c b/auto-generated/bfloat16/overloaded-api-testing/vsseg3e16.c new file mode 100644 index 000000000..066b28cf2 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vsseg3e16.c @@ -0,0 +1,38 @@ +#include +#include + +void test_vsseg3e16_v_bf16mf4x3(__bf16 *rs1, vbfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(rs1, vs3, vl); +} + +void test_vsseg3e16_v_bf16mf2x3(__bf16 *rs1, vbfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(rs1, vs3, vl); +} + +void test_vsseg3e16_v_bf16m1x3(__bf16 *rs1, vbfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(rs1, vs3, vl); +} + +void test_vsseg3e16_v_bf16m2x3(__bf16 *rs1, vbfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(rs1, vs3, vl); +} + +void test_vsseg3e16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(vm, rs1, vs3, vl); +} + +void test_vsseg3e16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsseg3e16(vm, rs1, vs3, vl); +} + +void test_vsseg3e16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1x3_t vs3, + size_t vl) { + return __riscv_vsseg3e16(vm, rs1, vs3, vl); +} + +void test_vsseg3e16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vbfloat16m2x3_t vs3, + size_t vl) { + return __riscv_vsseg3e16(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vsseg4e16.c b/auto-generated/bfloat16/overloaded-api-testing/vsseg4e16.c new file mode 100644 index 000000000..c0ab986d3 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vsseg4e16.c @@ -0,0 +1,38 @@ +#include +#include + +void test_vsseg4e16_v_bf16mf4x4(__bf16 *rs1, vbfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(rs1, vs3, vl); +} + +void test_vsseg4e16_v_bf16mf2x4(__bf16 *rs1, vbfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(rs1, vs3, vl); +} + +void test_vsseg4e16_v_bf16m1x4(__bf16 *rs1, vbfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(rs1, vs3, vl); +} + +void test_vsseg4e16_v_bf16m2x4(__bf16 *rs1, vbfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(rs1, vs3, vl); +} + +void test_vsseg4e16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(vm, rs1, vs3, vl); +} + +void test_vsseg4e16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsseg4e16(vm, rs1, vs3, vl); +} + +void test_vsseg4e16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1x4_t vs3, + size_t vl) { + return __riscv_vsseg4e16(vm, rs1, vs3, vl); +} + +void test_vsseg4e16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vbfloat16m2x4_t vs3, + size_t vl) { + return __riscv_vsseg4e16(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vsseg5e16.c b/auto-generated/bfloat16/overloaded-api-testing/vsseg5e16.c new file mode 100644 index 000000000..2c04b9b7d --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vsseg5e16.c @@ -0,0 +1,29 @@ +#include +#include + +void test_vsseg5e16_v_bf16mf4x5(__bf16 *rs1, vbfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsseg5e16(rs1, vs3, vl); +} + +void test_vsseg5e16_v_bf16mf2x5(__bf16 *rs1, vbfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e16(rs1, vs3, vl); +} + +void test_vsseg5e16_v_bf16m1x5(__bf16 *rs1, vbfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsseg5e16(rs1, vs3, vl); +} + +void test_vsseg5e16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsseg5e16(vm, rs1, vs3, vl); +} + +void test_vsseg5e16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsseg5e16(vm, rs1, vs3, vl); +} + +void test_vsseg5e16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1x5_t vs3, + size_t vl) { + return __riscv_vsseg5e16(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vsseg6e16.c b/auto-generated/bfloat16/overloaded-api-testing/vsseg6e16.c new file mode 100644 index 000000000..fe537164b --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vsseg6e16.c @@ -0,0 +1,29 @@ +#include +#include + +void test_vsseg6e16_v_bf16mf4x6(__bf16 *rs1, vbfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsseg6e16(rs1, vs3, vl); +} + +void test_vsseg6e16_v_bf16mf2x6(__bf16 *rs1, vbfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e16(rs1, vs3, vl); +} + +void test_vsseg6e16_v_bf16m1x6(__bf16 *rs1, vbfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsseg6e16(rs1, vs3, vl); +} + +void test_vsseg6e16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsseg6e16(vm, rs1, vs3, vl); +} + +void test_vsseg6e16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsseg6e16(vm, rs1, vs3, vl); +} + +void test_vsseg6e16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1x6_t vs3, + size_t vl) { + return __riscv_vsseg6e16(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vsseg7e16.c b/auto-generated/bfloat16/overloaded-api-testing/vsseg7e16.c new file mode 100644 index 000000000..36f79a388 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vsseg7e16.c @@ -0,0 +1,29 @@ +#include +#include + +void test_vsseg7e16_v_bf16mf4x7(__bf16 *rs1, vbfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsseg7e16(rs1, vs3, vl); +} + +void test_vsseg7e16_v_bf16mf2x7(__bf16 *rs1, vbfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e16(rs1, vs3, vl); +} + +void test_vsseg7e16_v_bf16m1x7(__bf16 *rs1, vbfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsseg7e16(rs1, vs3, vl); +} + +void test_vsseg7e16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsseg7e16(vm, rs1, vs3, vl); +} + +void test_vsseg7e16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsseg7e16(vm, rs1, vs3, vl); +} + +void test_vsseg7e16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1x7_t vs3, + size_t vl) { + return __riscv_vsseg7e16(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vsseg8e16.c b/auto-generated/bfloat16/overloaded-api-testing/vsseg8e16.c new file mode 100644 index 000000000..c6a631a0e --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vsseg8e16.c @@ -0,0 +1,29 @@ +#include +#include + +void test_vsseg8e16_v_bf16mf4x8(__bf16 *rs1, vbfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsseg8e16(rs1, vs3, vl); +} + +void test_vsseg8e16_v_bf16mf2x8(__bf16 *rs1, vbfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e16(rs1, vs3, vl); +} + +void test_vsseg8e16_v_bf16m1x8(__bf16 *rs1, vbfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsseg8e16(rs1, vs3, vl); +} + +void test_vsseg8e16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, + vbfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsseg8e16(vm, rs1, vs3, vl); +} + +void test_vsseg8e16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, + vbfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsseg8e16(vm, rs1, vs3, vl); +} + +void test_vsseg8e16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vbfloat16m1x8_t vs3, + size_t vl) { + return __riscv_vsseg8e16(vm, rs1, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vssseg2e16.c b/auto-generated/bfloat16/overloaded-api-testing/vssseg2e16.c new file mode 100644 index 000000000..28d76f0ae --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vssseg2e16.c @@ -0,0 +1,52 @@ +#include +#include + +void test_vssseg2e16_v_bf16mf4x2(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16mf2x2(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16m1x2(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x2_t vs3, + size_t vl) { + return __riscv_vssseg2e16(rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16m2x2(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m2x2_t vs3, + size_t vl) { + return __riscv_vssseg2e16(rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16m4x2(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m4x2_t vs3, + size_t vl) { + return __riscv_vssseg2e16(rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg2e16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m4x2_t vs3, size_t vl) { + return __riscv_vssseg2e16(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vssseg3e16.c b/auto-generated/bfloat16/overloaded-api-testing/vssseg3e16.c new file mode 100644 index 000000000..445145245 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vssseg3e16.c @@ -0,0 +1,42 @@ +#include +#include + +void test_vssseg3e16_v_bf16mf4x3(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(rs1, rs2, vs3, vl); +} + +void test_vssseg3e16_v_bf16mf2x3(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(rs1, rs2, vs3, vl); +} + +void test_vssseg3e16_v_bf16m1x3(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x3_t vs3, + size_t vl) { + return __riscv_vssseg3e16(rs1, rs2, vs3, vl); +} + +void test_vssseg3e16_v_bf16m2x3(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m2x3_t vs3, + size_t vl) { + return __riscv_vssseg3e16(rs1, rs2, vs3, vl); +} + +void test_vssseg3e16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg3e16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg3e16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg3e16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2x3_t vs3, size_t vl) { + return __riscv_vssseg3e16(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vssseg4e16.c b/auto-generated/bfloat16/overloaded-api-testing/vssseg4e16.c new file mode 100644 index 000000000..98d2b433a --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vssseg4e16.c @@ -0,0 +1,42 @@ +#include +#include + +void test_vssseg4e16_v_bf16mf4x4(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(rs1, rs2, vs3, vl); +} + +void test_vssseg4e16_v_bf16mf2x4(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(rs1, rs2, vs3, vl); +} + +void test_vssseg4e16_v_bf16m1x4(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x4_t vs3, + size_t vl) { + return __riscv_vssseg4e16(rs1, rs2, vs3, vl); +} + +void test_vssseg4e16_v_bf16m2x4(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m2x4_t vs3, + size_t vl) { + return __riscv_vssseg4e16(rs1, rs2, vs3, vl); +} + +void test_vssseg4e16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg4e16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg4e16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg4e16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2x4_t vs3, size_t vl) { + return __riscv_vssseg4e16(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vssseg5e16.c b/auto-generated/bfloat16/overloaded-api-testing/vssseg5e16.c new file mode 100644 index 000000000..d6f27bf5e --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vssseg5e16.c @@ -0,0 +1,32 @@ +#include +#include + +void test_vssseg5e16_v_bf16mf4x5(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vssseg5e16(rs1, rs2, vs3, vl); +} + +void test_vssseg5e16_v_bf16mf2x5(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e16(rs1, rs2, vs3, vl); +} + +void test_vssseg5e16_v_bf16m1x5(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x5_t vs3, + size_t vl) { + return __riscv_vssseg5e16(rs1, rs2, vs3, vl); +} + +void test_vssseg5e16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vssseg5e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg5e16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vssseg5e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg5e16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x5_t vs3, size_t vl) { + return __riscv_vssseg5e16(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vssseg6e16.c b/auto-generated/bfloat16/overloaded-api-testing/vssseg6e16.c new file mode 100644 index 000000000..ad952f272 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vssseg6e16.c @@ -0,0 +1,32 @@ +#include +#include + +void test_vssseg6e16_v_bf16mf4x6(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vssseg6e16(rs1, rs2, vs3, vl); +} + +void test_vssseg6e16_v_bf16mf2x6(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e16(rs1, rs2, vs3, vl); +} + +void test_vssseg6e16_v_bf16m1x6(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x6_t vs3, + size_t vl) { + return __riscv_vssseg6e16(rs1, rs2, vs3, vl); +} + +void test_vssseg6e16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vssseg6e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg6e16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vssseg6e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg6e16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x6_t vs3, size_t vl) { + return __riscv_vssseg6e16(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vssseg7e16.c b/auto-generated/bfloat16/overloaded-api-testing/vssseg7e16.c new file mode 100644 index 000000000..b84d2b9db --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vssseg7e16.c @@ -0,0 +1,32 @@ +#include +#include + +void test_vssseg7e16_v_bf16mf4x7(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vssseg7e16(rs1, rs2, vs3, vl); +} + +void test_vssseg7e16_v_bf16mf2x7(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e16(rs1, rs2, vs3, vl); +} + +void test_vssseg7e16_v_bf16m1x7(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x7_t vs3, + size_t vl) { + return __riscv_vssseg7e16(rs1, rs2, vs3, vl); +} + +void test_vssseg7e16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vssseg7e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg7e16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vssseg7e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg7e16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x7_t vs3, size_t vl) { + return __riscv_vssseg7e16(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vssseg8e16.c b/auto-generated/bfloat16/overloaded-api-testing/vssseg8e16.c new file mode 100644 index 000000000..195be7c8e --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vssseg8e16.c @@ -0,0 +1,32 @@ +#include +#include + +void test_vssseg8e16_v_bf16mf4x8(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vssseg8e16(rs1, rs2, vs3, vl); +} + +void test_vssseg8e16_v_bf16mf2x8(__bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e16(rs1, rs2, vs3, vl); +} + +void test_vssseg8e16_v_bf16m1x8(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x8_t vs3, + size_t vl) { + return __riscv_vssseg8e16(rs1, rs2, vs3, vl); +} + +void test_vssseg8e16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vssseg8e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg8e16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vssseg8e16(vm, rs1, rs2, vs3, vl); +} + +void test_vssseg8e16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x8_t vs3, size_t vl) { + return __riscv_vssseg8e16(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vsuxei16.c b/auto-generated/bfloat16/overloaded-api-testing/vsuxei16.c new file mode 100644 index 000000000..4236a87c0 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vsuxei16.c @@ -0,0 +1,62 @@ +#include +#include + +void test_vsuxei16_v_bf16mf4(__bf16 *rs1, vuint16mf4_t rs2, vbfloat16mf4_t vs3, + size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16mf2(__bf16 *rs1, vuint16mf2_t rs2, vbfloat16mf2_t vs3, + size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m1(__bf16 *rs1, vuint16m1_t rs2, vbfloat16m1_t vs3, + size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m2(__bf16 *rs1, vuint16m2_t rs2, vbfloat16m2_t vs3, + size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m4(__bf16 *rs1, vuint16m4_t rs2, vbfloat16m4_t vs3, + size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m8(__bf16 *rs1, vuint16m8_t rs2, vbfloat16m8_t vs3, + size_t vl) { + return __riscv_vsuxei16(rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16mf4_m(vbool64_t vm, __bf16 *rs1, vuint16mf4_t rs2, + vbfloat16mf4_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16mf2_m(vbool32_t vm, __bf16 *rs1, vuint16mf2_t rs2, + vbfloat16mf2_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m1_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t rs2, + vbfloat16m1_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t rs2, + vbfloat16m2_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m4_m(vbool4_t vm, __bf16 *rs1, vuint16m4_t rs2, + vbfloat16m4_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); +} + +void test_vsuxei16_v_bf16m8_m(vbool2_t vm, __bf16 *rs1, vuint16m8_t rs2, + vbfloat16m8_t vs3, size_t vl) { + return __riscv_vsuxei16(vm, rs1, rs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vsuxseg2ei16.c b/auto-generated/bfloat16/overloaded-api-testing/vsuxseg2ei16.c new file mode 100644 index 000000000..df05ac74e --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vsuxseg2ei16.c @@ -0,0 +1,54 @@ +#include +#include + +void test_vsuxseg2ei16_v_bf16mf4x2(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16mf2x2(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16m1x2(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16m2x2(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16m4x2(__bf16 *rs1, vuint16m4_t vs2, + vbfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16mf4x2_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x2_t vs3, + size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16mf2x2_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x2_t vs3, + size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16m1x2_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16m2x2_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg2ei16_v_bf16m4x2_m(vbool4_t vm, __bf16 *rs1, vuint16m4_t vs2, + vbfloat16m4x2_t vs3, size_t vl) { + return __riscv_vsuxseg2ei16(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vsuxseg3ei16.c b/auto-generated/bfloat16/overloaded-api-testing/vsuxseg3ei16.c new file mode 100644 index 000000000..6ca09eb44 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vsuxseg3ei16.c @@ -0,0 +1,44 @@ +#include +#include + +void test_vsuxseg3ei16_v_bf16mf4x3(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg3ei16_v_bf16mf2x3(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg3ei16_v_bf16m1x3(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg3ei16_v_bf16m2x3(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg3ei16_v_bf16mf4x3_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x3_t vs3, + size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg3ei16_v_bf16mf2x3_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x3_t vs3, + size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg3ei16_v_bf16m1x3_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg3ei16_v_bf16m2x3_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x3_t vs3, size_t vl) { + return __riscv_vsuxseg3ei16(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vsuxseg4ei16.c b/auto-generated/bfloat16/overloaded-api-testing/vsuxseg4ei16.c new file mode 100644 index 000000000..15d0841c3 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vsuxseg4ei16.c @@ -0,0 +1,44 @@ +#include +#include + +void test_vsuxseg4ei16_v_bf16mf4x4(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg4ei16_v_bf16mf2x4(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg4ei16_v_bf16m1x4(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg4ei16_v_bf16m2x4(__bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg4ei16_v_bf16mf4x4_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x4_t vs3, + size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg4ei16_v_bf16mf2x4_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x4_t vs3, + size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg4ei16_v_bf16m1x4_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg4ei16_v_bf16m2x4_m(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x4_t vs3, size_t vl) { + return __riscv_vsuxseg4ei16(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vsuxseg5ei16.c b/auto-generated/bfloat16/overloaded-api-testing/vsuxseg5ei16.c new file mode 100644 index 000000000..7467e5cd1 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vsuxseg5ei16.c @@ -0,0 +1,34 @@ +#include +#include + +void test_vsuxseg5ei16_v_bf16mf4x5(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg5ei16_v_bf16mf2x5(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg5ei16_v_bf16m1x5(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg5ei16_v_bf16mf4x5_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x5_t vs3, + size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg5ei16_v_bf16mf2x5_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x5_t vs3, + size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg5ei16_v_bf16m1x5_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x5_t vs3, size_t vl) { + return __riscv_vsuxseg5ei16(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vsuxseg6ei16.c b/auto-generated/bfloat16/overloaded-api-testing/vsuxseg6ei16.c new file mode 100644 index 000000000..437c9778f --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vsuxseg6ei16.c @@ -0,0 +1,34 @@ +#include +#include + +void test_vsuxseg6ei16_v_bf16mf4x6(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg6ei16_v_bf16mf2x6(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg6ei16_v_bf16m1x6(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg6ei16_v_bf16mf4x6_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x6_t vs3, + size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg6ei16_v_bf16mf2x6_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x6_t vs3, + size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg6ei16_v_bf16m1x6_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x6_t vs3, size_t vl) { + return __riscv_vsuxseg6ei16(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vsuxseg7ei16.c b/auto-generated/bfloat16/overloaded-api-testing/vsuxseg7ei16.c new file mode 100644 index 000000000..7e86d2539 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vsuxseg7ei16.c @@ -0,0 +1,34 @@ +#include +#include + +void test_vsuxseg7ei16_v_bf16mf4x7(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg7ei16_v_bf16mf2x7(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg7ei16_v_bf16m1x7(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg7ei16_v_bf16mf4x7_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x7_t vs3, + size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg7ei16_v_bf16mf2x7_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x7_t vs3, + size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg7ei16_v_bf16m1x7_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x7_t vs3, size_t vl) { + return __riscv_vsuxseg7ei16(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded-api-testing/vsuxseg8ei16.c b/auto-generated/bfloat16/overloaded-api-testing/vsuxseg8ei16.c new file mode 100644 index 000000000..eaaae3645 --- /dev/null +++ b/auto-generated/bfloat16/overloaded-api-testing/vsuxseg8ei16.c @@ -0,0 +1,34 @@ +#include +#include + +void test_vsuxseg8ei16_v_bf16mf4x8(__bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg8ei16_v_bf16mf2x8(__bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg8ei16_v_bf16m1x8(__bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(rs1, vs2, vs3, vl); +} + +void test_vsuxseg8ei16_v_bf16mf4x8_m(vbool64_t vm, __bf16 *rs1, + vuint16mf4_t vs2, vbfloat16mf4x8_t vs3, + size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg8ei16_v_bf16mf2x8_m(vbool32_t vm, __bf16 *rs1, + vuint16mf2_t vs2, vbfloat16mf2x8_t vs3, + size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); +} + +void test_vsuxseg8ei16_v_bf16m1x8_m(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x8_t vs3, size_t vl) { + return __riscv_vsuxseg8ei16(vm, rs1, vs2, vs3, vl); +} diff --git a/auto-generated/bfloat16/overloaded_intrinsic_funcs.adoc b/auto-generated/bfloat16/overloaded_intrinsic_funcs.adoc new file mode 100644 index 000000000..270a88ffa --- /dev/null +++ b/auto-generated/bfloat16/overloaded_intrinsic_funcs.adoc @@ -0,0 +1,1349 @@ + +=== BFloat16 Vector Loads and Stores Intrinsics + +[[overloaded-bf16-vector-unit-stride-load]] +==== Vector Unit-Stride Load Intrinsics + +[,c] +---- +// masked functions +vbfloat16mf4_t __riscv_vle16(vbool64_t vm, const __bf16 *rs1, size_t vl); +vbfloat16mf2_t __riscv_vle16(vbool32_t vm, const __bf16 *rs1, size_t vl); +vbfloat16m1_t __riscv_vle16(vbool16_t vm, const __bf16 *rs1, size_t vl); +vbfloat16m2_t __riscv_vle16(vbool8_t vm, const __bf16 *rs1, size_t vl); +vbfloat16m4_t __riscv_vle16(vbool4_t vm, const __bf16 *rs1, size_t vl); +vbfloat16m8_t __riscv_vle16(vbool2_t vm, const __bf16 *rs1, size_t vl); +---- + +[[overloaded-bf16-vector-unit-stride-store]] +==== Vector Unit-Stride Store Intrinsics + +[,c] +---- +void __riscv_vse16(__bf16 *rs1, vbfloat16mf4_t vs3, size_t vl); +void __riscv_vse16(__bf16 *rs1, vbfloat16mf2_t vs3, size_t vl); +void __riscv_vse16(__bf16 *rs1, vbfloat16m1_t vs3, size_t vl); +void __riscv_vse16(__bf16 *rs1, vbfloat16m2_t vs3, size_t vl); +void __riscv_vse16(__bf16 *rs1, vbfloat16m4_t vs3, size_t vl); +void __riscv_vse16(__bf16 *rs1, vbfloat16m8_t vs3, size_t vl); +// masked functions +void __riscv_vse16(vbool64_t vm, __bf16 *rs1, vbfloat16mf4_t vs3, size_t vl); +void __riscv_vse16(vbool32_t vm, __bf16 *rs1, vbfloat16mf2_t vs3, size_t vl); +void __riscv_vse16(vbool16_t vm, __bf16 *rs1, vbfloat16m1_t vs3, size_t vl); +void __riscv_vse16(vbool8_t vm, __bf16 *rs1, vbfloat16m2_t vs3, size_t vl); +void __riscv_vse16(vbool4_t vm, __bf16 *rs1, vbfloat16m4_t vs3, size_t vl); +void __riscv_vse16(vbool2_t vm, __bf16 *rs1, vbfloat16m8_t vs3, size_t vl); +---- + +[[overloaded-vector-strided-load]] +==== Vector Strided Load Intrinsics + +[,c] +---- +// masked functions +vbfloat16mf4_t __riscv_vlse16(vbool64_t vm, const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2_t __riscv_vlse16(vbool32_t vm, const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1_t __riscv_vlse16(vbool16_t vm, const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vlse16(vbool8_t vm, const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vlse16(vbool4_t vm, const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vlse16(vbool2_t vm, const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +---- + +[[overloaded-vector-strided-store]] +==== Vector Strided Store Intrinsics + +[,c] +---- +void __riscv_vsse16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf4_t vs3, size_t vl); +void __riscv_vsse16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf2_t vs3, size_t vl); +void __riscv_vsse16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1_t vs3, size_t vl); +void __riscv_vsse16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m2_t vs3, size_t vl); +void __riscv_vsse16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m4_t vs3, size_t vl); +void __riscv_vsse16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m8_t vs3, size_t vl); +// masked functions +void __riscv_vsse16(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4_t vs3, size_t vl); +void __riscv_vsse16(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2_t vs3, size_t vl); +void __riscv_vsse16(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, vbfloat16m1_t vs3, + size_t vl); +void __riscv_vsse16(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, vbfloat16m2_t vs3, + size_t vl); +void __riscv_vsse16(vbool4_t vm, __bf16 *rs1, ptrdiff_t rs2, vbfloat16m4_t vs3, + size_t vl); +void __riscv_vsse16(vbool2_t vm, __bf16 *rs1, ptrdiff_t rs2, vbfloat16m8_t vs3, + size_t vl); +---- + +[[overloaded-vector-indexed-load]] +==== Vector Indexed Load Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vloxei16(const __bf16 *rs1, vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vloxei16(const __bf16 *rs1, vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vloxei16(const __bf16 *rs1, vuint16m1_t rs2, size_t vl); +vbfloat16m2_t __riscv_vloxei16(const __bf16 *rs1, vuint16m2_t rs2, size_t vl); +vbfloat16m4_t __riscv_vloxei16(const __bf16 *rs1, vuint16m4_t rs2, size_t vl); +vbfloat16m8_t __riscv_vloxei16(const __bf16 *rs1, vuint16m8_t rs2, size_t vl); +vbfloat16mf4_t __riscv_vluxei16(const __bf16 *rs1, vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vluxei16(const __bf16 *rs1, vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vluxei16(const __bf16 *rs1, vuint16m1_t rs2, size_t vl); +vbfloat16m2_t __riscv_vluxei16(const __bf16 *rs1, vuint16m2_t rs2, size_t vl); +vbfloat16m4_t __riscv_vluxei16(const __bf16 *rs1, vuint16m4_t rs2, size_t vl); +vbfloat16m8_t __riscv_vluxei16(const __bf16 *rs1, vuint16m8_t rs2, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vloxei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vloxei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vloxei16(vbool16_t vm, const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vloxei16(vbool8_t vm, const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vloxei16(vbool4_t vm, const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vloxei16(vbool2_t vm, const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +vbfloat16mf4_t __riscv_vluxei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vluxei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vluxei16(vbool16_t vm, const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vluxei16(vbool8_t vm, const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vluxei16(vbool4_t vm, const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vluxei16(vbool2_t vm, const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +---- + +[[overloaded-vector-indexed-store]] +==== Vector Indexed Store Intrinsics + +[,c] +---- +void __riscv_vsoxei16(__bf16 *rs1, vuint16mf4_t rs2, vbfloat16mf4_t vs3, + size_t vl); +void __riscv_vsoxei16(__bf16 *rs1, vuint16mf2_t rs2, vbfloat16mf2_t vs3, + size_t vl); +void __riscv_vsoxei16(__bf16 *rs1, vuint16m1_t rs2, vbfloat16m1_t vs3, + size_t vl); +void __riscv_vsoxei16(__bf16 *rs1, vuint16m2_t rs2, vbfloat16m2_t vs3, + size_t vl); +void __riscv_vsoxei16(__bf16 *rs1, vuint16m4_t rs2, vbfloat16m4_t vs3, + size_t vl); +void __riscv_vsoxei16(__bf16 *rs1, vuint16m8_t rs2, vbfloat16m8_t vs3, + size_t vl); +void __riscv_vsuxei16(__bf16 *rs1, vuint16mf4_t rs2, vbfloat16mf4_t vs3, + size_t vl); +void __riscv_vsuxei16(__bf16 *rs1, vuint16mf2_t rs2, vbfloat16mf2_t vs3, + size_t vl); +void __riscv_vsuxei16(__bf16 *rs1, vuint16m1_t rs2, vbfloat16m1_t vs3, + size_t vl); +void __riscv_vsuxei16(__bf16 *rs1, vuint16m2_t rs2, vbfloat16m2_t vs3, + size_t vl); +void __riscv_vsuxei16(__bf16 *rs1, vuint16m4_t rs2, vbfloat16m4_t vs3, + size_t vl); +void __riscv_vsuxei16(__bf16 *rs1, vuint16m8_t rs2, vbfloat16m8_t vs3, + size_t vl); +// masked functions +void __riscv_vsoxei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t rs2, + vbfloat16mf4_t vs3, size_t vl); +void __riscv_vsoxei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t rs2, + vbfloat16mf2_t vs3, size_t vl); +void __riscv_vsoxei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t rs2, + vbfloat16m1_t vs3, size_t vl); +void __riscv_vsoxei16(vbool8_t vm, __bf16 *rs1, vuint16m2_t rs2, + vbfloat16m2_t vs3, size_t vl); +void __riscv_vsoxei16(vbool4_t vm, __bf16 *rs1, vuint16m4_t rs2, + vbfloat16m4_t vs3, size_t vl); +void __riscv_vsoxei16(vbool2_t vm, __bf16 *rs1, vuint16m8_t rs2, + vbfloat16m8_t vs3, size_t vl); +void __riscv_vsuxei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t rs2, + vbfloat16mf4_t vs3, size_t vl); +void __riscv_vsuxei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t rs2, + vbfloat16mf2_t vs3, size_t vl); +void __riscv_vsuxei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t rs2, + vbfloat16m1_t vs3, size_t vl); +void __riscv_vsuxei16(vbool8_t vm, __bf16 *rs1, vuint16m2_t rs2, + vbfloat16m2_t vs3, size_t vl); +void __riscv_vsuxei16(vbool4_t vm, __bf16 *rs1, vuint16m4_t rs2, + vbfloat16m4_t vs3, size_t vl); +void __riscv_vsuxei16(vbool2_t vm, __bf16 *rs1, vuint16m8_t rs2, + vbfloat16m8_t vs3, size_t vl); +---- + +[[overloaded-unit-stride-fault-only-first-loads]] +==== Unit-stride Fault-Only-First Loads Intrinsics + +[,c] +---- +// masked functions +vbfloat16mf4_t __riscv_vle16ff(vbool64_t vm, const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2_t __riscv_vle16ff(vbool32_t vm, const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1_t __riscv_vle16ff(vbool16_t vm, const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m2_t __riscv_vle16ff(vbool8_t vm, const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m4_t __riscv_vle16ff(vbool4_t vm, const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m8_t __riscv_vle16ff(vbool2_t vm, const __bf16 *rs1, size_t *new_vl, + size_t vl); +---- + +=== BFloat16 Vector Loads and Stores Segment Intrinsics + +[[overloaded-vector-unit-stride-segment-load]] +==== Vector Unit-Stride Segment Load Intrinsics + +[,c] +---- +// masked functions +vbfloat16mf4x2_t __riscv_vlseg2e16(vbool64_t vm, const __bf16 *rs1, size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16(vbool64_t vm, const __bf16 *rs1, size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16(vbool64_t vm, const __bf16 *rs1, size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16(vbool64_t vm, const __bf16 *rs1, size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16(vbool64_t vm, const __bf16 *rs1, size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16(vbool64_t vm, const __bf16 *rs1, size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16(vbool64_t vm, const __bf16 *rs1, size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16(vbool32_t vm, const __bf16 *rs1, size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16(vbool32_t vm, const __bf16 *rs1, size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16(vbool32_t vm, const __bf16 *rs1, size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16(vbool32_t vm, const __bf16 *rs1, size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16(vbool32_t vm, const __bf16 *rs1, size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16(vbool32_t vm, const __bf16 *rs1, size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16(vbool32_t vm, const __bf16 *rs1, size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16(vbool16_t vm, const __bf16 *rs1, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16(vbool16_t vm, const __bf16 *rs1, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16(vbool16_t vm, const __bf16 *rs1, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16(vbool16_t vm, const __bf16 *rs1, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16(vbool16_t vm, const __bf16 *rs1, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16(vbool16_t vm, const __bf16 *rs1, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16(vbool16_t vm, const __bf16 *rs1, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16(vbool8_t vm, const __bf16 *rs1, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16(vbool8_t vm, const __bf16 *rs1, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16(vbool8_t vm, const __bf16 *rs1, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16(vbool4_t vm, const __bf16 *rs1, size_t vl); +vbfloat16mf4x2_t __riscv_vlseg2e16ff(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16ff(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16ff(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16ff(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16ff(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16ff(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16ff(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16ff(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16ff(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16ff(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16ff(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16ff(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16ff(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16ff(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16ff(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16ff(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16ff(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16ff(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16ff(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16ff(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16ff(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16ff(vbool8_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16ff(vbool8_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16ff(vbool8_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16ff(vbool4_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +---- + +[[overloaded-vecrtor-unit-stride-segment-store]] +==== Vector Unit-Stride Segment Store Intrinsics + +[,c] +---- +void __riscv_vsseg2e16(__bf16 *rs1, vbfloat16mf4x2_t vs3, size_t vl); +void __riscv_vsseg3e16(__bf16 *rs1, vbfloat16mf4x3_t vs3, size_t vl); +void __riscv_vsseg4e16(__bf16 *rs1, vbfloat16mf4x4_t vs3, size_t vl); +void __riscv_vsseg5e16(__bf16 *rs1, vbfloat16mf4x5_t vs3, size_t vl); +void __riscv_vsseg6e16(__bf16 *rs1, vbfloat16mf4x6_t vs3, size_t vl); +void __riscv_vsseg7e16(__bf16 *rs1, vbfloat16mf4x7_t vs3, size_t vl); +void __riscv_vsseg8e16(__bf16 *rs1, vbfloat16mf4x8_t vs3, size_t vl); +void __riscv_vsseg2e16(__bf16 *rs1, vbfloat16mf2x2_t vs3, size_t vl); +void __riscv_vsseg3e16(__bf16 *rs1, vbfloat16mf2x3_t vs3, size_t vl); +void __riscv_vsseg4e16(__bf16 *rs1, vbfloat16mf2x4_t vs3, size_t vl); +void __riscv_vsseg5e16(__bf16 *rs1, vbfloat16mf2x5_t vs3, size_t vl); +void __riscv_vsseg6e16(__bf16 *rs1, vbfloat16mf2x6_t vs3, size_t vl); +void __riscv_vsseg7e16(__bf16 *rs1, vbfloat16mf2x7_t vs3, size_t vl); +void __riscv_vsseg8e16(__bf16 *rs1, vbfloat16mf2x8_t vs3, size_t vl); +void __riscv_vsseg2e16(__bf16 *rs1, vbfloat16m1x2_t vs3, size_t vl); +void __riscv_vsseg3e16(__bf16 *rs1, vbfloat16m1x3_t vs3, size_t vl); +void __riscv_vsseg4e16(__bf16 *rs1, vbfloat16m1x4_t vs3, size_t vl); +void __riscv_vsseg5e16(__bf16 *rs1, vbfloat16m1x5_t vs3, size_t vl); +void __riscv_vsseg6e16(__bf16 *rs1, vbfloat16m1x6_t vs3, size_t vl); +void __riscv_vsseg7e16(__bf16 *rs1, vbfloat16m1x7_t vs3, size_t vl); +void __riscv_vsseg8e16(__bf16 *rs1, vbfloat16m1x8_t vs3, size_t vl); +void __riscv_vsseg2e16(__bf16 *rs1, vbfloat16m2x2_t vs3, size_t vl); +void __riscv_vsseg3e16(__bf16 *rs1, vbfloat16m2x3_t vs3, size_t vl); +void __riscv_vsseg4e16(__bf16 *rs1, vbfloat16m2x4_t vs3, size_t vl); +void __riscv_vsseg2e16(__bf16 *rs1, vbfloat16m4x2_t vs3, size_t vl); +// masked functions +void __riscv_vsseg2e16(vbool64_t vm, __bf16 *rs1, vbfloat16mf4x2_t vs3, + size_t vl); +void __riscv_vsseg3e16(vbool64_t vm, __bf16 *rs1, vbfloat16mf4x3_t vs3, + size_t vl); +void __riscv_vsseg4e16(vbool64_t vm, __bf16 *rs1, vbfloat16mf4x4_t vs3, + size_t vl); +void __riscv_vsseg5e16(vbool64_t vm, __bf16 *rs1, vbfloat16mf4x5_t vs3, + size_t vl); +void __riscv_vsseg6e16(vbool64_t vm, __bf16 *rs1, vbfloat16mf4x6_t vs3, + size_t vl); +void __riscv_vsseg7e16(vbool64_t vm, __bf16 *rs1, vbfloat16mf4x7_t vs3, + size_t vl); +void __riscv_vsseg8e16(vbool64_t vm, __bf16 *rs1, vbfloat16mf4x8_t vs3, + size_t vl); +void __riscv_vsseg2e16(vbool32_t vm, __bf16 *rs1, vbfloat16mf2x2_t vs3, + size_t vl); +void __riscv_vsseg3e16(vbool32_t vm, __bf16 *rs1, vbfloat16mf2x3_t vs3, + size_t vl); +void __riscv_vsseg4e16(vbool32_t vm, __bf16 *rs1, vbfloat16mf2x4_t vs3, + size_t vl); +void __riscv_vsseg5e16(vbool32_t vm, __bf16 *rs1, vbfloat16mf2x5_t vs3, + size_t vl); +void __riscv_vsseg6e16(vbool32_t vm, __bf16 *rs1, vbfloat16mf2x6_t vs3, + size_t vl); +void __riscv_vsseg7e16(vbool32_t vm, __bf16 *rs1, vbfloat16mf2x7_t vs3, + size_t vl); +void __riscv_vsseg8e16(vbool32_t vm, __bf16 *rs1, vbfloat16mf2x8_t vs3, + size_t vl); +void __riscv_vsseg2e16(vbool16_t vm, __bf16 *rs1, vbfloat16m1x2_t vs3, + size_t vl); +void __riscv_vsseg3e16(vbool16_t vm, __bf16 *rs1, vbfloat16m1x3_t vs3, + size_t vl); +void __riscv_vsseg4e16(vbool16_t vm, __bf16 *rs1, vbfloat16m1x4_t vs3, + size_t vl); +void __riscv_vsseg5e16(vbool16_t vm, __bf16 *rs1, vbfloat16m1x5_t vs3, + size_t vl); +void __riscv_vsseg6e16(vbool16_t vm, __bf16 *rs1, vbfloat16m1x6_t vs3, + size_t vl); +void __riscv_vsseg7e16(vbool16_t vm, __bf16 *rs1, vbfloat16m1x7_t vs3, + size_t vl); +void __riscv_vsseg8e16(vbool16_t vm, __bf16 *rs1, vbfloat16m1x8_t vs3, + size_t vl); +void __riscv_vsseg2e16(vbool8_t vm, __bf16 *rs1, vbfloat16m2x2_t vs3, + size_t vl); +void __riscv_vsseg3e16(vbool8_t vm, __bf16 *rs1, vbfloat16m2x3_t vs3, + size_t vl); +void __riscv_vsseg4e16(vbool8_t vm, __bf16 *rs1, vbfloat16m2x4_t vs3, + size_t vl); +void __riscv_vsseg2e16(vbool4_t vm, __bf16 *rs1, vbfloat16m4x2_t vs3, + size_t vl); +---- + +[[overloaded-vector-strided-segment-load]] +==== Vector Strided Segment Load Intrinsics + +[,c] +---- +// masked functions +vbfloat16mf4x2_t __riscv_vlsseg2e16(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x3_t __riscv_vlsseg3e16(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x4_t __riscv_vlsseg4e16(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x5_t __riscv_vlsseg5e16(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x6_t __riscv_vlsseg6e16(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x7_t __riscv_vlsseg7e16(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x8_t __riscv_vlsseg8e16(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x2_t __riscv_vlsseg2e16(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x3_t __riscv_vlsseg3e16(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x4_t __riscv_vlsseg4e16(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x5_t __riscv_vlsseg5e16(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x6_t __riscv_vlsseg6e16(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x7_t __riscv_vlsseg7e16(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x8_t __riscv_vlsseg8e16(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x2_t __riscv_vlsseg2e16(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vlsseg3e16(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vlsseg4e16(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vlsseg5e16(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vlsseg6e16(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vlsseg7e16(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vlsseg8e16(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vlsseg2e16(vbool8_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vlsseg3e16(vbool8_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vlsseg4e16(vbool8_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vlsseg2e16(vbool4_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +---- + +[[overloaded-vector-strided-segment-store]] +==== Vector Strided Segment Store Intrinsics + +[,c] +---- +void __riscv_vssseg2e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf4x2_t vs3, + size_t vl); +void __riscv_vssseg3e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf4x3_t vs3, + size_t vl); +void __riscv_vssseg4e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf4x4_t vs3, + size_t vl); +void __riscv_vssseg5e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf4x5_t vs3, + size_t vl); +void __riscv_vssseg6e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf4x6_t vs3, + size_t vl); +void __riscv_vssseg7e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf4x7_t vs3, + size_t vl); +void __riscv_vssseg8e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf4x8_t vs3, + size_t vl); +void __riscv_vssseg2e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf2x2_t vs3, + size_t vl); +void __riscv_vssseg3e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf2x3_t vs3, + size_t vl); +void __riscv_vssseg4e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf2x4_t vs3, + size_t vl); +void __riscv_vssseg5e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf2x5_t vs3, + size_t vl); +void __riscv_vssseg6e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf2x6_t vs3, + size_t vl); +void __riscv_vssseg7e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf2x7_t vs3, + size_t vl); +void __riscv_vssseg8e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf2x8_t vs3, + size_t vl); +void __riscv_vssseg2e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x2_t vs3, + size_t vl); +void __riscv_vssseg3e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x3_t vs3, + size_t vl); +void __riscv_vssseg4e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x4_t vs3, + size_t vl); +void __riscv_vssseg5e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x5_t vs3, + size_t vl); +void __riscv_vssseg6e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x6_t vs3, + size_t vl); +void __riscv_vssseg7e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x7_t vs3, + size_t vl); +void __riscv_vssseg8e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x8_t vs3, + size_t vl); +void __riscv_vssseg2e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m2x2_t vs3, + size_t vl); +void __riscv_vssseg3e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m2x3_t vs3, + size_t vl); +void __riscv_vssseg4e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m2x4_t vs3, + size_t vl); +void __riscv_vssseg2e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m4x2_t vs3, + size_t vl); +// masked functions +void __riscv_vssseg2e16(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x2_t vs3, size_t vl); +void __riscv_vssseg3e16(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x3_t vs3, size_t vl); +void __riscv_vssseg4e16(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x4_t vs3, size_t vl); +void __riscv_vssseg5e16(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x5_t vs3, size_t vl); +void __riscv_vssseg6e16(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x6_t vs3, size_t vl); +void __riscv_vssseg7e16(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x7_t vs3, size_t vl); +void __riscv_vssseg8e16(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x8_t vs3, size_t vl); +void __riscv_vssseg2e16(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x2_t vs3, size_t vl); +void __riscv_vssseg3e16(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x3_t vs3, size_t vl); +void __riscv_vssseg4e16(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x4_t vs3, size_t vl); +void __riscv_vssseg5e16(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x5_t vs3, size_t vl); +void __riscv_vssseg6e16(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x6_t vs3, size_t vl); +void __riscv_vssseg7e16(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x7_t vs3, size_t vl); +void __riscv_vssseg8e16(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x8_t vs3, size_t vl); +void __riscv_vssseg2e16(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x2_t vs3, size_t vl); +void __riscv_vssseg3e16(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x3_t vs3, size_t vl); +void __riscv_vssseg4e16(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x4_t vs3, size_t vl); +void __riscv_vssseg5e16(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x5_t vs3, size_t vl); +void __riscv_vssseg6e16(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x6_t vs3, size_t vl); +void __riscv_vssseg7e16(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x7_t vs3, size_t vl); +void __riscv_vssseg8e16(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x8_t vs3, size_t vl); +void __riscv_vssseg2e16(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2x2_t vs3, size_t vl); +void __riscv_vssseg3e16(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2x3_t vs3, size_t vl); +void __riscv_vssseg4e16(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2x4_t vs3, size_t vl); +void __riscv_vssseg2e16(vbool4_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m4x2_t vs3, size_t vl); +---- + +[[overloaded-vector-indexed-segment-load]] +==== Vector Indexed Segment Load Intrinsics + +[,c] +---- +vbfloat16mf4x2_t __riscv_vloxseg2ei16(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vloxseg3ei16(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vloxseg4ei16(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vloxseg5ei16(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vloxseg6ei16(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vloxseg7ei16(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vloxseg8ei16(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vloxseg2ei16(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vloxseg3ei16(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vloxseg4ei16(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vloxseg5ei16(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vloxseg6ei16(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vloxseg7ei16(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vloxseg8ei16(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vloxseg2ei16(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x3_t __riscv_vloxseg3ei16(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x4_t __riscv_vloxseg4ei16(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x5_t __riscv_vloxseg5ei16(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x6_t __riscv_vloxseg6ei16(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x7_t __riscv_vloxseg7ei16(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x8_t __riscv_vloxseg8ei16(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2x2_t __riscv_vloxseg2ei16(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x3_t __riscv_vloxseg3ei16(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x4_t __riscv_vloxseg4ei16(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4x2_t __riscv_vloxseg2ei16(const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16mf4x2_t __riscv_vluxseg2ei16(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vluxseg3ei16(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vluxseg4ei16(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vluxseg5ei16(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vluxseg6ei16(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vluxseg7ei16(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vluxseg8ei16(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vluxseg2ei16(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vluxseg3ei16(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vluxseg4ei16(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vluxseg5ei16(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vluxseg6ei16(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vluxseg7ei16(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vluxseg8ei16(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vluxseg2ei16(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x3_t __riscv_vluxseg3ei16(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x4_t __riscv_vluxseg4ei16(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x5_t __riscv_vluxseg5ei16(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x6_t __riscv_vluxseg6ei16(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x7_t __riscv_vluxseg7ei16(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x8_t __riscv_vluxseg8ei16(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2x2_t __riscv_vluxseg2ei16(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x3_t __riscv_vluxseg3ei16(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x4_t __riscv_vluxseg4ei16(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4x2_t __riscv_vluxseg2ei16(const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vloxseg2ei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x3_t __riscv_vloxseg3ei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x4_t __riscv_vloxseg4ei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x5_t __riscv_vloxseg5ei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x6_t __riscv_vloxseg6ei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x7_t __riscv_vloxseg7ei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x8_t __riscv_vloxseg8ei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2x2_t __riscv_vloxseg2ei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x3_t __riscv_vloxseg3ei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x4_t __riscv_vloxseg4ei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x5_t __riscv_vloxseg5ei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x6_t __riscv_vloxseg6ei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x7_t __riscv_vloxseg7ei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x8_t __riscv_vloxseg8ei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1x2_t __riscv_vloxseg2ei16(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vloxseg3ei16(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vloxseg4ei16(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vloxseg5ei16(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vloxseg6ei16(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vloxseg7ei16(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vloxseg8ei16(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vloxseg2ei16(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vloxseg3ei16(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vloxseg4ei16(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vloxseg2ei16(vbool4_t vm, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +vbfloat16mf4x2_t __riscv_vluxseg2ei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x3_t __riscv_vluxseg3ei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x4_t __riscv_vluxseg4ei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x5_t __riscv_vluxseg5ei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x6_t __riscv_vluxseg6ei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x7_t __riscv_vluxseg7ei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x8_t __riscv_vluxseg8ei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2x2_t __riscv_vluxseg2ei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x3_t __riscv_vluxseg3ei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x4_t __riscv_vluxseg4ei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x5_t __riscv_vluxseg5ei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x6_t __riscv_vluxseg6ei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x7_t __riscv_vluxseg7ei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x8_t __riscv_vluxseg8ei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1x2_t __riscv_vluxseg2ei16(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vluxseg3ei16(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vluxseg4ei16(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vluxseg5ei16(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vluxseg6ei16(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vluxseg7ei16(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vluxseg8ei16(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vluxseg2ei16(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vluxseg3ei16(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vluxseg4ei16(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vluxseg2ei16(vbool4_t vm, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +---- + +[[overloaded-vector-indexed-segment-store]] +==== Vector Indexed Segment Store Intrinsics + +[,c] +---- +void __riscv_vsoxseg2ei16(__bf16 *rs1, vuint16mf4_t vs2, vbfloat16mf4x2_t vs3, + size_t vl); +void __riscv_vsoxseg3ei16(__bf16 *rs1, vuint16mf4_t vs2, vbfloat16mf4x3_t vs3, + size_t vl); +void __riscv_vsoxseg4ei16(__bf16 *rs1, vuint16mf4_t vs2, vbfloat16mf4x4_t vs3, + size_t vl); +void __riscv_vsoxseg5ei16(__bf16 *rs1, vuint16mf4_t vs2, vbfloat16mf4x5_t vs3, + size_t vl); +void __riscv_vsoxseg6ei16(__bf16 *rs1, vuint16mf4_t vs2, vbfloat16mf4x6_t vs3, + size_t vl); +void __riscv_vsoxseg7ei16(__bf16 *rs1, vuint16mf4_t vs2, vbfloat16mf4x7_t vs3, + size_t vl); +void __riscv_vsoxseg8ei16(__bf16 *rs1, vuint16mf4_t vs2, vbfloat16mf4x8_t vs3, + size_t vl); +void __riscv_vsoxseg2ei16(__bf16 *rs1, vuint16mf2_t vs2, vbfloat16mf2x2_t vs3, + size_t vl); +void __riscv_vsoxseg3ei16(__bf16 *rs1, vuint16mf2_t vs2, vbfloat16mf2x3_t vs3, + size_t vl); +void __riscv_vsoxseg4ei16(__bf16 *rs1, vuint16mf2_t vs2, vbfloat16mf2x4_t vs3, + size_t vl); +void __riscv_vsoxseg5ei16(__bf16 *rs1, vuint16mf2_t vs2, vbfloat16mf2x5_t vs3, + size_t vl); +void __riscv_vsoxseg6ei16(__bf16 *rs1, vuint16mf2_t vs2, vbfloat16mf2x6_t vs3, + size_t vl); +void __riscv_vsoxseg7ei16(__bf16 *rs1, vuint16mf2_t vs2, vbfloat16mf2x7_t vs3, + size_t vl); +void __riscv_vsoxseg8ei16(__bf16 *rs1, vuint16mf2_t vs2, vbfloat16mf2x8_t vs3, + size_t vl); +void __riscv_vsoxseg2ei16(__bf16 *rs1, vuint16m1_t vs2, vbfloat16m1x2_t vs3, + size_t vl); +void __riscv_vsoxseg3ei16(__bf16 *rs1, vuint16m1_t vs2, vbfloat16m1x3_t vs3, + size_t vl); +void __riscv_vsoxseg4ei16(__bf16 *rs1, vuint16m1_t vs2, vbfloat16m1x4_t vs3, + size_t vl); +void __riscv_vsoxseg5ei16(__bf16 *rs1, vuint16m1_t vs2, vbfloat16m1x5_t vs3, + size_t vl); +void __riscv_vsoxseg6ei16(__bf16 *rs1, vuint16m1_t vs2, vbfloat16m1x6_t vs3, + size_t vl); +void __riscv_vsoxseg7ei16(__bf16 *rs1, vuint16m1_t vs2, vbfloat16m1x7_t vs3, + size_t vl); +void __riscv_vsoxseg8ei16(__bf16 *rs1, vuint16m1_t vs2, vbfloat16m1x8_t vs3, + size_t vl); +void __riscv_vsoxseg2ei16(__bf16 *rs1, vuint16m2_t vs2, vbfloat16m2x2_t vs3, + size_t vl); +void __riscv_vsoxseg3ei16(__bf16 *rs1, vuint16m2_t vs2, vbfloat16m2x3_t vs3, + size_t vl); +void __riscv_vsoxseg4ei16(__bf16 *rs1, vuint16m2_t vs2, vbfloat16m2x4_t vs3, + size_t vl); +void __riscv_vsoxseg2ei16(__bf16 *rs1, vuint16m4_t vs2, vbfloat16m4x2_t vs3, + size_t vl); +void __riscv_vsuxseg2ei16(__bf16 *rs1, vuint16mf4_t vs2, vbfloat16mf4x2_t vs3, + size_t vl); +void __riscv_vsuxseg3ei16(__bf16 *rs1, vuint16mf4_t vs2, vbfloat16mf4x3_t vs3, + size_t vl); +void __riscv_vsuxseg4ei16(__bf16 *rs1, vuint16mf4_t vs2, vbfloat16mf4x4_t vs3, + size_t vl); +void __riscv_vsuxseg5ei16(__bf16 *rs1, vuint16mf4_t vs2, vbfloat16mf4x5_t vs3, + size_t vl); +void __riscv_vsuxseg6ei16(__bf16 *rs1, vuint16mf4_t vs2, vbfloat16mf4x6_t vs3, + size_t vl); +void __riscv_vsuxseg7ei16(__bf16 *rs1, vuint16mf4_t vs2, vbfloat16mf4x7_t vs3, + size_t vl); +void __riscv_vsuxseg8ei16(__bf16 *rs1, vuint16mf4_t vs2, vbfloat16mf4x8_t vs3, + size_t vl); +void __riscv_vsuxseg2ei16(__bf16 *rs1, vuint16mf2_t vs2, vbfloat16mf2x2_t vs3, + size_t vl); +void __riscv_vsuxseg3ei16(__bf16 *rs1, vuint16mf2_t vs2, vbfloat16mf2x3_t vs3, + size_t vl); +void __riscv_vsuxseg4ei16(__bf16 *rs1, vuint16mf2_t vs2, vbfloat16mf2x4_t vs3, + size_t vl); +void __riscv_vsuxseg5ei16(__bf16 *rs1, vuint16mf2_t vs2, vbfloat16mf2x5_t vs3, + size_t vl); +void __riscv_vsuxseg6ei16(__bf16 *rs1, vuint16mf2_t vs2, vbfloat16mf2x6_t vs3, + size_t vl); +void __riscv_vsuxseg7ei16(__bf16 *rs1, vuint16mf2_t vs2, vbfloat16mf2x7_t vs3, + size_t vl); +void __riscv_vsuxseg8ei16(__bf16 *rs1, vuint16mf2_t vs2, vbfloat16mf2x8_t vs3, + size_t vl); +void __riscv_vsuxseg2ei16(__bf16 *rs1, vuint16m1_t vs2, vbfloat16m1x2_t vs3, + size_t vl); +void __riscv_vsuxseg3ei16(__bf16 *rs1, vuint16m1_t vs2, vbfloat16m1x3_t vs3, + size_t vl); +void __riscv_vsuxseg4ei16(__bf16 *rs1, vuint16m1_t vs2, vbfloat16m1x4_t vs3, + size_t vl); +void __riscv_vsuxseg5ei16(__bf16 *rs1, vuint16m1_t vs2, vbfloat16m1x5_t vs3, + size_t vl); +void __riscv_vsuxseg6ei16(__bf16 *rs1, vuint16m1_t vs2, vbfloat16m1x6_t vs3, + size_t vl); +void __riscv_vsuxseg7ei16(__bf16 *rs1, vuint16m1_t vs2, vbfloat16m1x7_t vs3, + size_t vl); +void __riscv_vsuxseg8ei16(__bf16 *rs1, vuint16m1_t vs2, vbfloat16m1x8_t vs3, + size_t vl); +void __riscv_vsuxseg2ei16(__bf16 *rs1, vuint16m2_t vs2, vbfloat16m2x2_t vs3, + size_t vl); +void __riscv_vsuxseg3ei16(__bf16 *rs1, vuint16m2_t vs2, vbfloat16m2x3_t vs3, + size_t vl); +void __riscv_vsuxseg4ei16(__bf16 *rs1, vuint16m2_t vs2, vbfloat16m2x4_t vs3, + size_t vl); +void __riscv_vsuxseg2ei16(__bf16 *rs1, vuint16m4_t vs2, vbfloat16m4x2_t vs3, + size_t vl); +// masked functions +void __riscv_vsoxseg2ei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x2_t vs3, size_t vl); +void __riscv_vsoxseg3ei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x3_t vs3, size_t vl); +void __riscv_vsoxseg4ei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x4_t vs3, size_t vl); +void __riscv_vsoxseg5ei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x5_t vs3, size_t vl); +void __riscv_vsoxseg6ei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x6_t vs3, size_t vl); +void __riscv_vsoxseg7ei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x7_t vs3, size_t vl); +void __riscv_vsoxseg8ei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x8_t vs3, size_t vl); +void __riscv_vsoxseg2ei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x2_t vs3, size_t vl); +void __riscv_vsoxseg3ei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x3_t vs3, size_t vl); +void __riscv_vsoxseg4ei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x4_t vs3, size_t vl); +void __riscv_vsoxseg5ei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x5_t vs3, size_t vl); +void __riscv_vsoxseg6ei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x6_t vs3, size_t vl); +void __riscv_vsoxseg7ei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x7_t vs3, size_t vl); +void __riscv_vsoxseg8ei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x8_t vs3, size_t vl); +void __riscv_vsoxseg2ei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x2_t vs3, size_t vl); +void __riscv_vsoxseg3ei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x3_t vs3, size_t vl); +void __riscv_vsoxseg4ei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x4_t vs3, size_t vl); +void __riscv_vsoxseg5ei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x5_t vs3, size_t vl); +void __riscv_vsoxseg6ei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x6_t vs3, size_t vl); +void __riscv_vsoxseg7ei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x7_t vs3, size_t vl); +void __riscv_vsoxseg8ei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x8_t vs3, size_t vl); +void __riscv_vsoxseg2ei16(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x2_t vs3, size_t vl); +void __riscv_vsoxseg3ei16(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x3_t vs3, size_t vl); +void __riscv_vsoxseg4ei16(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x4_t vs3, size_t vl); +void __riscv_vsoxseg2ei16(vbool4_t vm, __bf16 *rs1, vuint16m4_t vs2, + vbfloat16m4x2_t vs3, size_t vl); +void __riscv_vsuxseg2ei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x2_t vs3, size_t vl); +void __riscv_vsuxseg3ei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x3_t vs3, size_t vl); +void __riscv_vsuxseg4ei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x4_t vs3, size_t vl); +void __riscv_vsuxseg5ei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x5_t vs3, size_t vl); +void __riscv_vsuxseg6ei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x6_t vs3, size_t vl); +void __riscv_vsuxseg7ei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x7_t vs3, size_t vl); +void __riscv_vsuxseg8ei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x8_t vs3, size_t vl); +void __riscv_vsuxseg2ei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x2_t vs3, size_t vl); +void __riscv_vsuxseg3ei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x3_t vs3, size_t vl); +void __riscv_vsuxseg4ei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x4_t vs3, size_t vl); +void __riscv_vsuxseg5ei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x5_t vs3, size_t vl); +void __riscv_vsuxseg6ei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x6_t vs3, size_t vl); +void __riscv_vsuxseg7ei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x7_t vs3, size_t vl); +void __riscv_vsuxseg8ei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x8_t vs3, size_t vl); +void __riscv_vsuxseg2ei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x2_t vs3, size_t vl); +void __riscv_vsuxseg3ei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x3_t vs3, size_t vl); +void __riscv_vsuxseg4ei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x4_t vs3, size_t vl); +void __riscv_vsuxseg5ei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x5_t vs3, size_t vl); +void __riscv_vsuxseg6ei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x6_t vs3, size_t vl); +void __riscv_vsuxseg7ei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x7_t vs3, size_t vl); +void __riscv_vsuxseg8ei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x8_t vs3, size_t vl); +void __riscv_vsuxseg2ei16(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x2_t vs3, size_t vl); +void __riscv_vsuxseg3ei16(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x3_t vs3, size_t vl); +void __riscv_vsuxseg4ei16(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x4_t vs3, size_t vl); +void __riscv_vsuxseg2ei16(vbool4_t vm, __bf16 *rs1, vuint16m4_t vs2, + vbfloat16m4x2_t vs3, size_t vl); +---- + +=== BFloat16 Convert Intrinsics + +[[overloaded-bf16-vector-narrow-convert]] +==== Vector Narrowing Convert Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vfncvtbf16_f(vfloat32mf2_t vs2, size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f(vfloat32m1_t vs2, size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f(vfloat32m2_t vs2, size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f(vfloat32m4_t vs2, size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f(vfloat32m8_t vs2, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f(vbool64_t vm, vfloat32mf2_t vs2, size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f(vbool32_t vm, vfloat32m1_t vs2, size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f(vbool16_t vm, vfloat32m2_t vs2, size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f(vbool8_t vm, vfloat32m4_t vs2, size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f(vbool4_t vm, vfloat32m8_t vs2, size_t vl); +vbfloat16mf4_t __riscv_vfncvtbf16_f(vfloat32mf2_t vs2, unsigned int frm, + size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f(vfloat32m1_t vs2, unsigned int frm, + size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f(vfloat32m2_t vs2, unsigned int frm, + size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f(vfloat32m4_t vs2, unsigned int frm, + size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f(vfloat32m8_t vs2, unsigned int frm, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f(vbool64_t vm, vfloat32mf2_t vs2, + unsigned int frm, size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f(vbool32_t vm, vfloat32m1_t vs2, + unsigned int frm, size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f(vbool16_t vm, vfloat32m2_t vs2, + unsigned int frm, size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f(vbool8_t vm, vfloat32m4_t vs2, + unsigned int frm, size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f(vbool4_t vm, vfloat32m8_t vs2, + unsigned int frm, size_t vl); +---- + +[[overloaded-bf16-vector-widening-convert]] +==== Vector Widening Convert Intrinsics + +[,c] +---- +vfloat32mf2_t __riscv_vfwcvtbf16_f(vbfloat16mf4_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwcvtbf16_f(vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwcvtbf16_f(vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwcvtbf16_f(vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwcvtbf16_f(vbfloat16m4_t vs2, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwcvtbf16_f(vbool64_t vm, vbfloat16mf4_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwcvtbf16_f(vbool32_t vm, vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwcvtbf16_f(vbool16_t vm, vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwcvtbf16_f(vbool8_t vm, vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwcvtbf16_f(vbool4_t vm, vbfloat16m4_t vs2, size_t vl); +---- + +=== BFloat16 Arithmetic Intrinsics + +[[overloaded-bf16-widening-multiply-accumulate]] +==== Vector Widening Multiply-Accumulate Intrinsics + +[,c] +---- +vfloat32mf2_t __riscv_vfwmaccbf16(vfloat32mf2_t vd, vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16(vfloat32m2_t vd, __bf16 vs1, vbfloat16m1_t vs2, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16(vfloat32m4_t vd, __bf16 vs1, vbfloat16m2_t vs2, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16(vfloat32m8_t vd, __bf16 vs1, vbfloat16m4_t vs2, + size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwmaccbf16(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, + size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16(vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16(vbool32_t vm, vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, vbfloat16m1_t vs2, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16(vbool16_t vm, vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, vbfloat16m2_t vs2, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16(vbool8_t vm, vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, vbfloat16m4_t vs2, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16(vbool4_t vm, vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16(vfloat32mf2_t vd, vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, unsigned int frm, + size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, unsigned int frm, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, unsigned int frm, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, unsigned int frm, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, unsigned int frm, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16(vfloat32m2_t vd, __bf16 vs1, vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, unsigned int frm, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16(vfloat32m4_t vd, __bf16 vs1, vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, unsigned int frm, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16(vfloat32m8_t vd, __bf16 vs1, vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwmaccbf16(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16(vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, unsigned int frm, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16(vbool32_t vm, vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, unsigned int frm, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16(vbool16_t vm, vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, unsigned int frm, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16(vbool8_t vm, vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, unsigned int frm, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16(vbool4_t vm, vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, unsigned int frm, + size_t vl); +---- + +[[overloaded-vector-bf16-move]] +==== Vector BFloat16 Move Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vmv_v(vbfloat16mf4_t vs1, size_t vl); +vbfloat16mf2_t __riscv_vmv_v(vbfloat16mf2_t vs1, size_t vl); +vbfloat16m1_t __riscv_vmv_v(vbfloat16m1_t vs1, size_t vl); +vbfloat16m2_t __riscv_vmv_v(vbfloat16m2_t vs1, size_t vl); +vbfloat16m4_t __riscv_vmv_v(vbfloat16m4_t vs1, size_t vl); +vbfloat16m8_t __riscv_vmv_v(vbfloat16m8_t vs1, size_t vl); +---- + +[[overloaded-vector-bf16-merge]] +==== Vector BFloat16 Merge Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vmerge(vbfloat16mf4_t vs2, vbfloat16mf4_t vs1, + vbool64_t v0, size_t vl); +vbfloat16mf2_t __riscv_vmerge(vbfloat16mf2_t vs2, vbfloat16mf2_t vs1, + vbool32_t v0, size_t vl); +vbfloat16m1_t __riscv_vmerge(vbfloat16m1_t vs2, vbfloat16m1_t vs1, vbool16_t v0, + size_t vl); +vbfloat16m2_t __riscv_vmerge(vbfloat16m2_t vs2, vbfloat16m2_t vs1, vbool8_t v0, + size_t vl); +vbfloat16m4_t __riscv_vmerge(vbfloat16m4_t vs2, vbfloat16m4_t vs1, vbool4_t v0, + size_t vl); +vbfloat16m8_t __riscv_vmerge(vbfloat16m8_t vs2, vbfloat16m8_t vs1, vbool2_t v0, + size_t vl); +---- + +=== BFloat16 Miscellaneous Vector Utility Intrinsics + +[[overloaded-reinterpret-cast-conversion]] +==== Reinterpret Cast Conversion Intrinsics + +[,c] +---- +// Reinterpret between different type under the same SEW/LMUL +vbfloat16mf4_t __riscv_vreinterpret_bf16mf4(vint16mf4_t src); +vbfloat16mf2_t __riscv_vreinterpret_bf16mf2(vint16mf2_t src); +vbfloat16m1_t __riscv_vreinterpret_bf16m1(vint16m1_t src); +vbfloat16m2_t __riscv_vreinterpret_bf16m2(vint16m2_t src); +vbfloat16m4_t __riscv_vreinterpret_bf16m4(vint16m4_t src); +vbfloat16m8_t __riscv_vreinterpret_bf16m8(vint16m8_t src); +vbfloat16mf4_t __riscv_vreinterpret_bf16mf4(vuint16mf4_t src); +vbfloat16mf2_t __riscv_vreinterpret_bf16mf2(vuint16mf2_t src); +vbfloat16m1_t __riscv_vreinterpret_bf16m1(vuint16m1_t src); +vbfloat16m2_t __riscv_vreinterpret_bf16m2(vuint16m2_t src); +vbfloat16m4_t __riscv_vreinterpret_bf16m4(vuint16m4_t src); +vbfloat16m8_t __riscv_vreinterpret_bf16m8(vuint16m8_t src); +vint16mf4_t __riscv_vreinterpret_i16mf4(vbfloat16mf4_t src); +vint16mf2_t __riscv_vreinterpret_i16mf2(vbfloat16mf2_t src); +vint16m1_t __riscv_vreinterpret_i16m1(vbfloat16m1_t src); +vint16m2_t __riscv_vreinterpret_i16m2(vbfloat16m2_t src); +vint16m4_t __riscv_vreinterpret_i16m4(vbfloat16m4_t src); +vint16m8_t __riscv_vreinterpret_i16m8(vbfloat16m8_t src); +vuint16mf4_t __riscv_vreinterpret_u16mf4(vbfloat16mf4_t src); +vuint16mf2_t __riscv_vreinterpret_u16mf2(vbfloat16mf2_t src); +vuint16m1_t __riscv_vreinterpret_u16m1(vbfloat16m1_t src); +vuint16m2_t __riscv_vreinterpret_u16m2(vbfloat16m2_t src); +vuint16m4_t __riscv_vreinterpret_u16m4(vbfloat16m4_t src); +vuint16m8_t __riscv_vreinterpret_u16m8(vbfloat16m8_t src); +---- + +[[overloaded-vector-lmul-extensionn]] +==== Vector LMUL Extension Intrinsics + +[,c] +---- +vbfloat16mf2_t __riscv_vlmul_ext_bf16mf2(vbfloat16mf4_t value); +vbfloat16m1_t __riscv_vlmul_ext_bf16m1(vbfloat16mf4_t value); +vbfloat16m2_t __riscv_vlmul_ext_bf16m2(vbfloat16mf4_t value); +vbfloat16m4_t __riscv_vlmul_ext_bf16m4(vbfloat16mf4_t value); +vbfloat16m8_t __riscv_vlmul_ext_bf16m8(vbfloat16mf4_t value); +vbfloat16m1_t __riscv_vlmul_ext_bf16m1(vbfloat16mf2_t value); +vbfloat16m2_t __riscv_vlmul_ext_bf16m2(vbfloat16mf2_t value); +vbfloat16m4_t __riscv_vlmul_ext_bf16m4(vbfloat16mf2_t value); +vbfloat16m8_t __riscv_vlmul_ext_bf16m8(vbfloat16mf2_t value); +vbfloat16m2_t __riscv_vlmul_ext_bf16m2(vbfloat16m1_t value); +vbfloat16m4_t __riscv_vlmul_ext_bf16m4(vbfloat16m1_t value); +vbfloat16m8_t __riscv_vlmul_ext_bf16m8(vbfloat16m1_t value); +vbfloat16m4_t __riscv_vlmul_ext_bf16m4(vbfloat16m2_t value); +vbfloat16m8_t __riscv_vlmul_ext_bf16m8(vbfloat16m2_t value); +vbfloat16m8_t __riscv_vlmul_ext_bf16m8(vbfloat16m4_t value); +---- + +[[overloaded-vector-lmul-truncation]] +==== Vector LMUL Truncation Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vlmul_trunc_bf16mf4(vbfloat16mf2_t value); +vbfloat16mf4_t __riscv_vlmul_trunc_bf16mf4(vbfloat16m1_t value); +vbfloat16mf2_t __riscv_vlmul_trunc_bf16mf2(vbfloat16m1_t value); +vbfloat16mf4_t __riscv_vlmul_trunc_bf16mf4(vbfloat16m2_t value); +vbfloat16mf2_t __riscv_vlmul_trunc_bf16mf2(vbfloat16m2_t value); +vbfloat16m1_t __riscv_vlmul_trunc_bf16m1(vbfloat16m2_t value); +vbfloat16mf4_t __riscv_vlmul_trunc_bf16mf4(vbfloat16m4_t value); +vbfloat16mf2_t __riscv_vlmul_trunc_bf16mf2(vbfloat16m4_t value); +vbfloat16m1_t __riscv_vlmul_trunc_bf16m1(vbfloat16m4_t value); +vbfloat16m2_t __riscv_vlmul_trunc_bf16m2(vbfloat16m4_t value); +vbfloat16mf4_t __riscv_vlmul_trunc_bf16mf4(vbfloat16m8_t value); +vbfloat16mf2_t __riscv_vlmul_trunc_bf16mf2(vbfloat16m8_t value); +vbfloat16m1_t __riscv_vlmul_trunc_bf16m1(vbfloat16m8_t value); +vbfloat16m2_t __riscv_vlmul_trunc_bf16m2(vbfloat16m8_t value); +vbfloat16m4_t __riscv_vlmul_trunc_bf16m4(vbfloat16m8_t value); +---- + +[[overloaded-vector-initialization]] +==== Vector Initialization Intrinsics +Intrinsics here don't have an overloaded variant. + +[[overloaded-vector-insertion]] +==== Vector Insertion Intrinsics + +[,c] +---- +vbfloat16m2_t __riscv_vset(vbfloat16m2_t dest, size_t index, + vbfloat16m1_t value); +vbfloat16m4_t __riscv_vset(vbfloat16m4_t dest, size_t index, + vbfloat16m1_t value); +vbfloat16m4_t __riscv_vset(vbfloat16m4_t dest, size_t index, + vbfloat16m2_t value); +vbfloat16m8_t __riscv_vset(vbfloat16m8_t dest, size_t index, + vbfloat16m1_t value); +vbfloat16m8_t __riscv_vset(vbfloat16m8_t dest, size_t index, + vbfloat16m2_t value); +vbfloat16m8_t __riscv_vset(vbfloat16m8_t dest, size_t index, + vbfloat16m4_t value); +vbfloat16mf4x2_t __riscv_vset(vbfloat16mf4x2_t dest, size_t index, + vbfloat16mf4_t value); +vbfloat16mf4x3_t __riscv_vset(vbfloat16mf4x3_t dest, size_t index, + vbfloat16mf4_t value); +vbfloat16mf4x4_t __riscv_vset(vbfloat16mf4x4_t dest, size_t index, + vbfloat16mf4_t value); +vbfloat16mf4x5_t __riscv_vset(vbfloat16mf4x5_t dest, size_t index, + vbfloat16mf4_t value); +vbfloat16mf4x6_t __riscv_vset(vbfloat16mf4x6_t dest, size_t index, + vbfloat16mf4_t value); +vbfloat16mf4x7_t __riscv_vset(vbfloat16mf4x7_t dest, size_t index, + vbfloat16mf4_t value); +vbfloat16mf4x8_t __riscv_vset(vbfloat16mf4x8_t dest, size_t index, + vbfloat16mf4_t value); +vbfloat16mf2x2_t __riscv_vset(vbfloat16mf2x2_t dest, size_t index, + vbfloat16mf2_t value); +vbfloat16mf2x3_t __riscv_vset(vbfloat16mf2x3_t dest, size_t index, + vbfloat16mf2_t value); +vbfloat16mf2x4_t __riscv_vset(vbfloat16mf2x4_t dest, size_t index, + vbfloat16mf2_t value); +vbfloat16mf2x5_t __riscv_vset(vbfloat16mf2x5_t dest, size_t index, + vbfloat16mf2_t value); +vbfloat16mf2x6_t __riscv_vset(vbfloat16mf2x6_t dest, size_t index, + vbfloat16mf2_t value); +vbfloat16mf2x7_t __riscv_vset(vbfloat16mf2x7_t dest, size_t index, + vbfloat16mf2_t value); +vbfloat16mf2x8_t __riscv_vset(vbfloat16mf2x8_t dest, size_t index, + vbfloat16mf2_t value); +vbfloat16m1x2_t __riscv_vset(vbfloat16m1x2_t dest, size_t index, + vbfloat16m1_t value); +vbfloat16m1x3_t __riscv_vset(vbfloat16m1x3_t dest, size_t index, + vbfloat16m1_t value); +vbfloat16m1x4_t __riscv_vset(vbfloat16m1x4_t dest, size_t index, + vbfloat16m1_t value); +vbfloat16m1x5_t __riscv_vset(vbfloat16m1x5_t dest, size_t index, + vbfloat16m1_t value); +vbfloat16m1x6_t __riscv_vset(vbfloat16m1x6_t dest, size_t index, + vbfloat16m1_t value); +vbfloat16m1x7_t __riscv_vset(vbfloat16m1x7_t dest, size_t index, + vbfloat16m1_t value); +vbfloat16m1x8_t __riscv_vset(vbfloat16m1x8_t dest, size_t index, + vbfloat16m1_t value); +vbfloat16m2x2_t __riscv_vset(vbfloat16m2x2_t dest, size_t index, + vbfloat16m2_t value); +vbfloat16m2x3_t __riscv_vset(vbfloat16m2x3_t dest, size_t index, + vbfloat16m2_t value); +vbfloat16m2x4_t __riscv_vset(vbfloat16m2x4_t dest, size_t index, + vbfloat16m2_t value); +vbfloat16m4x2_t __riscv_vset(vbfloat16m4x2_t dest, size_t index, + vbfloat16m4_t value); +---- + +[[overloaded-vector-extraction]] +==== Vector Extraction Intrinsics + +[,c] +---- +vbfloat16m1_t __riscv_vget_bf16m1(vbfloat16m2_t src, size_t index); +vbfloat16m1_t __riscv_vget_bf16m1(vbfloat16m4_t src, size_t index); +vbfloat16m1_t __riscv_vget_bf16m1(vbfloat16m8_t src, size_t index); +vbfloat16m2_t __riscv_vget_bf16m2(vbfloat16m4_t src, size_t index); +vbfloat16m2_t __riscv_vget_bf16m2(vbfloat16m8_t src, size_t index); +vbfloat16m4_t __riscv_vget_bf16m4(vbfloat16m8_t src, size_t index); +vbfloat16mf4_t __riscv_vget_bf16mf4(vbfloat16mf4x2_t src, size_t index); +vbfloat16mf4_t __riscv_vget_bf16mf4(vbfloat16mf4x3_t src, size_t index); +vbfloat16mf4_t __riscv_vget_bf16mf4(vbfloat16mf4x4_t src, size_t index); +vbfloat16mf4_t __riscv_vget_bf16mf4(vbfloat16mf4x5_t src, size_t index); +vbfloat16mf4_t __riscv_vget_bf16mf4(vbfloat16mf4x6_t src, size_t index); +vbfloat16mf4_t __riscv_vget_bf16mf4(vbfloat16mf4x7_t src, size_t index); +vbfloat16mf4_t __riscv_vget_bf16mf4(vbfloat16mf4x8_t src, size_t index); +vbfloat16mf2_t __riscv_vget_bf16mf2(vbfloat16mf2x2_t src, size_t index); +vbfloat16mf2_t __riscv_vget_bf16mf2(vbfloat16mf2x3_t src, size_t index); +vbfloat16mf2_t __riscv_vget_bf16mf2(vbfloat16mf2x4_t src, size_t index); +vbfloat16mf2_t __riscv_vget_bf16mf2(vbfloat16mf2x5_t src, size_t index); +vbfloat16mf2_t __riscv_vget_bf16mf2(vbfloat16mf2x6_t src, size_t index); +vbfloat16mf2_t __riscv_vget_bf16mf2(vbfloat16mf2x7_t src, size_t index); +vbfloat16mf2_t __riscv_vget_bf16mf2(vbfloat16mf2x8_t src, size_t index); +vbfloat16m1_t __riscv_vget_bf16m1(vbfloat16m1x2_t src, size_t index); +vbfloat16m1_t __riscv_vget_bf16m1(vbfloat16m1x3_t src, size_t index); +vbfloat16m1_t __riscv_vget_bf16m1(vbfloat16m1x4_t src, size_t index); +vbfloat16m1_t __riscv_vget_bf16m1(vbfloat16m1x5_t src, size_t index); +vbfloat16m1_t __riscv_vget_bf16m1(vbfloat16m1x6_t src, size_t index); +vbfloat16m1_t __riscv_vget_bf16m1(vbfloat16m1x7_t src, size_t index); +vbfloat16m1_t __riscv_vget_bf16m1(vbfloat16m1x8_t src, size_t index); +vbfloat16m2_t __riscv_vget_bf16m2(vbfloat16m2x2_t src, size_t index); +vbfloat16m2_t __riscv_vget_bf16m2(vbfloat16m2x3_t src, size_t index); +vbfloat16m2_t __riscv_vget_bf16m2(vbfloat16m2x4_t src, size_t index); +vbfloat16m4_t __riscv_vget_bf16m4(vbfloat16m4x2_t src, size_t index); +---- + +[[overloaded-vector-creation]] +==== Vector Creation Intrinsics +Intrinsics here don't have an overloaded variant. diff --git a/auto-generated/bfloat16/overloaded_intrinsic_funcs/00_bfloat16_vector_loads_and_stores_intrinsics.adoc b/auto-generated/bfloat16/overloaded_intrinsic_funcs/00_bfloat16_vector_loads_and_stores_intrinsics.adoc new file mode 100644 index 000000000..67cac3d50 --- /dev/null +++ b/auto-generated/bfloat16/overloaded_intrinsic_funcs/00_bfloat16_vector_loads_and_stores_intrinsics.adoc @@ -0,0 +1,202 @@ + +=== BFloat16 Vector Loads and Stores Intrinsics + +[[overloaded-bf16-vector-unit-stride-load]] +==== Vector Unit-Stride Load Intrinsics + +[,c] +---- +// masked functions +vbfloat16mf4_t __riscv_vle16(vbool64_t vm, const __bf16 *rs1, size_t vl); +vbfloat16mf2_t __riscv_vle16(vbool32_t vm, const __bf16 *rs1, size_t vl); +vbfloat16m1_t __riscv_vle16(vbool16_t vm, const __bf16 *rs1, size_t vl); +vbfloat16m2_t __riscv_vle16(vbool8_t vm, const __bf16 *rs1, size_t vl); +vbfloat16m4_t __riscv_vle16(vbool4_t vm, const __bf16 *rs1, size_t vl); +vbfloat16m8_t __riscv_vle16(vbool2_t vm, const __bf16 *rs1, size_t vl); +---- + +[[overloaded-bf16-vector-unit-stride-store]] +==== Vector Unit-Stride Store Intrinsics + +[,c] +---- +void __riscv_vse16(__bf16 *rs1, vbfloat16mf4_t vs3, size_t vl); +void __riscv_vse16(__bf16 *rs1, vbfloat16mf2_t vs3, size_t vl); +void __riscv_vse16(__bf16 *rs1, vbfloat16m1_t vs3, size_t vl); +void __riscv_vse16(__bf16 *rs1, vbfloat16m2_t vs3, size_t vl); +void __riscv_vse16(__bf16 *rs1, vbfloat16m4_t vs3, size_t vl); +void __riscv_vse16(__bf16 *rs1, vbfloat16m8_t vs3, size_t vl); +// masked functions +void __riscv_vse16(vbool64_t vm, __bf16 *rs1, vbfloat16mf4_t vs3, size_t vl); +void __riscv_vse16(vbool32_t vm, __bf16 *rs1, vbfloat16mf2_t vs3, size_t vl); +void __riscv_vse16(vbool16_t vm, __bf16 *rs1, vbfloat16m1_t vs3, size_t vl); +void __riscv_vse16(vbool8_t vm, __bf16 *rs1, vbfloat16m2_t vs3, size_t vl); +void __riscv_vse16(vbool4_t vm, __bf16 *rs1, vbfloat16m4_t vs3, size_t vl); +void __riscv_vse16(vbool2_t vm, __bf16 *rs1, vbfloat16m8_t vs3, size_t vl); +---- + +[[overloaded-vector-strided-load]] +==== Vector Strided Load Intrinsics + +[,c] +---- +// masked functions +vbfloat16mf4_t __riscv_vlse16(vbool64_t vm, const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2_t __riscv_vlse16(vbool32_t vm, const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1_t __riscv_vlse16(vbool16_t vm, const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vlse16(vbool8_t vm, const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vlse16(vbool4_t vm, const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vlse16(vbool2_t vm, const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +---- + +[[overloaded-vector-strided-store]] +==== Vector Strided Store Intrinsics + +[,c] +---- +void __riscv_vsse16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf4_t vs3, size_t vl); +void __riscv_vsse16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf2_t vs3, size_t vl); +void __riscv_vsse16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1_t vs3, size_t vl); +void __riscv_vsse16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m2_t vs3, size_t vl); +void __riscv_vsse16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m4_t vs3, size_t vl); +void __riscv_vsse16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m8_t vs3, size_t vl); +// masked functions +void __riscv_vsse16(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4_t vs3, size_t vl); +void __riscv_vsse16(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2_t vs3, size_t vl); +void __riscv_vsse16(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, vbfloat16m1_t vs3, + size_t vl); +void __riscv_vsse16(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, vbfloat16m2_t vs3, + size_t vl); +void __riscv_vsse16(vbool4_t vm, __bf16 *rs1, ptrdiff_t rs2, vbfloat16m4_t vs3, + size_t vl); +void __riscv_vsse16(vbool2_t vm, __bf16 *rs1, ptrdiff_t rs2, vbfloat16m8_t vs3, + size_t vl); +---- + +[[overloaded-vector-indexed-load]] +==== Vector Indexed Load Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vloxei16(const __bf16 *rs1, vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vloxei16(const __bf16 *rs1, vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vloxei16(const __bf16 *rs1, vuint16m1_t rs2, size_t vl); +vbfloat16m2_t __riscv_vloxei16(const __bf16 *rs1, vuint16m2_t rs2, size_t vl); +vbfloat16m4_t __riscv_vloxei16(const __bf16 *rs1, vuint16m4_t rs2, size_t vl); +vbfloat16m8_t __riscv_vloxei16(const __bf16 *rs1, vuint16m8_t rs2, size_t vl); +vbfloat16mf4_t __riscv_vluxei16(const __bf16 *rs1, vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vluxei16(const __bf16 *rs1, vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vluxei16(const __bf16 *rs1, vuint16m1_t rs2, size_t vl); +vbfloat16m2_t __riscv_vluxei16(const __bf16 *rs1, vuint16m2_t rs2, size_t vl); +vbfloat16m4_t __riscv_vluxei16(const __bf16 *rs1, vuint16m4_t rs2, size_t vl); +vbfloat16m8_t __riscv_vluxei16(const __bf16 *rs1, vuint16m8_t rs2, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vloxei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vloxei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vloxei16(vbool16_t vm, const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vloxei16(vbool8_t vm, const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vloxei16(vbool4_t vm, const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vloxei16(vbool2_t vm, const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +vbfloat16mf4_t __riscv_vluxei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vluxei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vluxei16(vbool16_t vm, const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vluxei16(vbool8_t vm, const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vluxei16(vbool4_t vm, const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vluxei16(vbool2_t vm, const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +---- + +[[overloaded-vector-indexed-store]] +==== Vector Indexed Store Intrinsics + +[,c] +---- +void __riscv_vsoxei16(__bf16 *rs1, vuint16mf4_t rs2, vbfloat16mf4_t vs3, + size_t vl); +void __riscv_vsoxei16(__bf16 *rs1, vuint16mf2_t rs2, vbfloat16mf2_t vs3, + size_t vl); +void __riscv_vsoxei16(__bf16 *rs1, vuint16m1_t rs2, vbfloat16m1_t vs3, + size_t vl); +void __riscv_vsoxei16(__bf16 *rs1, vuint16m2_t rs2, vbfloat16m2_t vs3, + size_t vl); +void __riscv_vsoxei16(__bf16 *rs1, vuint16m4_t rs2, vbfloat16m4_t vs3, + size_t vl); +void __riscv_vsoxei16(__bf16 *rs1, vuint16m8_t rs2, vbfloat16m8_t vs3, + size_t vl); +void __riscv_vsuxei16(__bf16 *rs1, vuint16mf4_t rs2, vbfloat16mf4_t vs3, + size_t vl); +void __riscv_vsuxei16(__bf16 *rs1, vuint16mf2_t rs2, vbfloat16mf2_t vs3, + size_t vl); +void __riscv_vsuxei16(__bf16 *rs1, vuint16m1_t rs2, vbfloat16m1_t vs3, + size_t vl); +void __riscv_vsuxei16(__bf16 *rs1, vuint16m2_t rs2, vbfloat16m2_t vs3, + size_t vl); +void __riscv_vsuxei16(__bf16 *rs1, vuint16m4_t rs2, vbfloat16m4_t vs3, + size_t vl); +void __riscv_vsuxei16(__bf16 *rs1, vuint16m8_t rs2, vbfloat16m8_t vs3, + size_t vl); +// masked functions +void __riscv_vsoxei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t rs2, + vbfloat16mf4_t vs3, size_t vl); +void __riscv_vsoxei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t rs2, + vbfloat16mf2_t vs3, size_t vl); +void __riscv_vsoxei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t rs2, + vbfloat16m1_t vs3, size_t vl); +void __riscv_vsoxei16(vbool8_t vm, __bf16 *rs1, vuint16m2_t rs2, + vbfloat16m2_t vs3, size_t vl); +void __riscv_vsoxei16(vbool4_t vm, __bf16 *rs1, vuint16m4_t rs2, + vbfloat16m4_t vs3, size_t vl); +void __riscv_vsoxei16(vbool2_t vm, __bf16 *rs1, vuint16m8_t rs2, + vbfloat16m8_t vs3, size_t vl); +void __riscv_vsuxei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t rs2, + vbfloat16mf4_t vs3, size_t vl); +void __riscv_vsuxei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t rs2, + vbfloat16mf2_t vs3, size_t vl); +void __riscv_vsuxei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t rs2, + vbfloat16m1_t vs3, size_t vl); +void __riscv_vsuxei16(vbool8_t vm, __bf16 *rs1, vuint16m2_t rs2, + vbfloat16m2_t vs3, size_t vl); +void __riscv_vsuxei16(vbool4_t vm, __bf16 *rs1, vuint16m4_t rs2, + vbfloat16m4_t vs3, size_t vl); +void __riscv_vsuxei16(vbool2_t vm, __bf16 *rs1, vuint16m8_t rs2, + vbfloat16m8_t vs3, size_t vl); +---- + +[[overloaded-unit-stride-fault-only-first-loads]] +==== Unit-stride Fault-Only-First Loads Intrinsics + +[,c] +---- +// masked functions +vbfloat16mf4_t __riscv_vle16ff(vbool64_t vm, const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2_t __riscv_vle16ff(vbool32_t vm, const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1_t __riscv_vle16ff(vbool16_t vm, const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m2_t __riscv_vle16ff(vbool8_t vm, const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m4_t __riscv_vle16ff(vbool4_t vm, const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m8_t __riscv_vle16ff(vbool2_t vm, const __bf16 *rs1, size_t *new_vl, + size_t vl); +---- diff --git a/auto-generated/bfloat16/overloaded_intrinsic_funcs/01_bfloat16_vector_loads_and_stores_segment_intrinsics.adoc b/auto-generated/bfloat16/overloaded_intrinsic_funcs/01_bfloat16_vector_loads_and_stores_segment_intrinsics.adoc new file mode 100644 index 000000000..06d4d0a39 --- /dev/null +++ b/auto-generated/bfloat16/overloaded_intrinsic_funcs/01_bfloat16_vector_loads_and_stores_segment_intrinsics.adoc @@ -0,0 +1,750 @@ + +=== BFloat16 Vector Loads and Stores Segment Intrinsics + +[[overloaded-vector-unit-stride-segment-load]] +==== Vector Unit-Stride Segment Load Intrinsics + +[,c] +---- +// masked functions +vbfloat16mf4x2_t __riscv_vlseg2e16(vbool64_t vm, const __bf16 *rs1, size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16(vbool64_t vm, const __bf16 *rs1, size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16(vbool64_t vm, const __bf16 *rs1, size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16(vbool64_t vm, const __bf16 *rs1, size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16(vbool64_t vm, const __bf16 *rs1, size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16(vbool64_t vm, const __bf16 *rs1, size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16(vbool64_t vm, const __bf16 *rs1, size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16(vbool32_t vm, const __bf16 *rs1, size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16(vbool32_t vm, const __bf16 *rs1, size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16(vbool32_t vm, const __bf16 *rs1, size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16(vbool32_t vm, const __bf16 *rs1, size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16(vbool32_t vm, const __bf16 *rs1, size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16(vbool32_t vm, const __bf16 *rs1, size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16(vbool32_t vm, const __bf16 *rs1, size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16(vbool16_t vm, const __bf16 *rs1, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16(vbool16_t vm, const __bf16 *rs1, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16(vbool16_t vm, const __bf16 *rs1, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16(vbool16_t vm, const __bf16 *rs1, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16(vbool16_t vm, const __bf16 *rs1, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16(vbool16_t vm, const __bf16 *rs1, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16(vbool16_t vm, const __bf16 *rs1, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16(vbool8_t vm, const __bf16 *rs1, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16(vbool8_t vm, const __bf16 *rs1, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16(vbool8_t vm, const __bf16 *rs1, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16(vbool4_t vm, const __bf16 *rs1, size_t vl); +vbfloat16mf4x2_t __riscv_vlseg2e16ff(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16ff(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16ff(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16ff(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16ff(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16ff(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16ff(vbool64_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16ff(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16ff(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16ff(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16ff(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16ff(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16ff(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16ff(vbool32_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16ff(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16ff(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16ff(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16ff(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16ff(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16ff(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16ff(vbool16_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16ff(vbool8_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16ff(vbool8_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16ff(vbool8_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16ff(vbool4_t vm, const __bf16 *rs1, + size_t *new_vl, size_t vl); +---- + +[[overloaded-vecrtor-unit-stride-segment-store]] +==== Vector Unit-Stride Segment Store Intrinsics + +[,c] +---- +void __riscv_vsseg2e16(__bf16 *rs1, vbfloat16mf4x2_t vs3, size_t vl); +void __riscv_vsseg3e16(__bf16 *rs1, vbfloat16mf4x3_t vs3, size_t vl); +void __riscv_vsseg4e16(__bf16 *rs1, vbfloat16mf4x4_t vs3, size_t vl); +void __riscv_vsseg5e16(__bf16 *rs1, vbfloat16mf4x5_t vs3, size_t vl); +void __riscv_vsseg6e16(__bf16 *rs1, vbfloat16mf4x6_t vs3, size_t vl); +void __riscv_vsseg7e16(__bf16 *rs1, vbfloat16mf4x7_t vs3, size_t vl); +void __riscv_vsseg8e16(__bf16 *rs1, vbfloat16mf4x8_t vs3, size_t vl); +void __riscv_vsseg2e16(__bf16 *rs1, vbfloat16mf2x2_t vs3, size_t vl); +void __riscv_vsseg3e16(__bf16 *rs1, vbfloat16mf2x3_t vs3, size_t vl); +void __riscv_vsseg4e16(__bf16 *rs1, vbfloat16mf2x4_t vs3, size_t vl); +void __riscv_vsseg5e16(__bf16 *rs1, vbfloat16mf2x5_t vs3, size_t vl); +void __riscv_vsseg6e16(__bf16 *rs1, vbfloat16mf2x6_t vs3, size_t vl); +void __riscv_vsseg7e16(__bf16 *rs1, vbfloat16mf2x7_t vs3, size_t vl); +void __riscv_vsseg8e16(__bf16 *rs1, vbfloat16mf2x8_t vs3, size_t vl); +void __riscv_vsseg2e16(__bf16 *rs1, vbfloat16m1x2_t vs3, size_t vl); +void __riscv_vsseg3e16(__bf16 *rs1, vbfloat16m1x3_t vs3, size_t vl); +void __riscv_vsseg4e16(__bf16 *rs1, vbfloat16m1x4_t vs3, size_t vl); +void __riscv_vsseg5e16(__bf16 *rs1, vbfloat16m1x5_t vs3, size_t vl); +void __riscv_vsseg6e16(__bf16 *rs1, vbfloat16m1x6_t vs3, size_t vl); +void __riscv_vsseg7e16(__bf16 *rs1, vbfloat16m1x7_t vs3, size_t vl); +void __riscv_vsseg8e16(__bf16 *rs1, vbfloat16m1x8_t vs3, size_t vl); +void __riscv_vsseg2e16(__bf16 *rs1, vbfloat16m2x2_t vs3, size_t vl); +void __riscv_vsseg3e16(__bf16 *rs1, vbfloat16m2x3_t vs3, size_t vl); +void __riscv_vsseg4e16(__bf16 *rs1, vbfloat16m2x4_t vs3, size_t vl); +void __riscv_vsseg2e16(__bf16 *rs1, vbfloat16m4x2_t vs3, size_t vl); +// masked functions +void __riscv_vsseg2e16(vbool64_t vm, __bf16 *rs1, vbfloat16mf4x2_t vs3, + size_t vl); +void __riscv_vsseg3e16(vbool64_t vm, __bf16 *rs1, vbfloat16mf4x3_t vs3, + size_t vl); +void __riscv_vsseg4e16(vbool64_t vm, __bf16 *rs1, vbfloat16mf4x4_t vs3, + size_t vl); +void __riscv_vsseg5e16(vbool64_t vm, __bf16 *rs1, vbfloat16mf4x5_t vs3, + size_t vl); +void __riscv_vsseg6e16(vbool64_t vm, __bf16 *rs1, vbfloat16mf4x6_t vs3, + size_t vl); +void __riscv_vsseg7e16(vbool64_t vm, __bf16 *rs1, vbfloat16mf4x7_t vs3, + size_t vl); +void __riscv_vsseg8e16(vbool64_t vm, __bf16 *rs1, vbfloat16mf4x8_t vs3, + size_t vl); +void __riscv_vsseg2e16(vbool32_t vm, __bf16 *rs1, vbfloat16mf2x2_t vs3, + size_t vl); +void __riscv_vsseg3e16(vbool32_t vm, __bf16 *rs1, vbfloat16mf2x3_t vs3, + size_t vl); +void __riscv_vsseg4e16(vbool32_t vm, __bf16 *rs1, vbfloat16mf2x4_t vs3, + size_t vl); +void __riscv_vsseg5e16(vbool32_t vm, __bf16 *rs1, vbfloat16mf2x5_t vs3, + size_t vl); +void __riscv_vsseg6e16(vbool32_t vm, __bf16 *rs1, vbfloat16mf2x6_t vs3, + size_t vl); +void __riscv_vsseg7e16(vbool32_t vm, __bf16 *rs1, vbfloat16mf2x7_t vs3, + size_t vl); +void __riscv_vsseg8e16(vbool32_t vm, __bf16 *rs1, vbfloat16mf2x8_t vs3, + size_t vl); +void __riscv_vsseg2e16(vbool16_t vm, __bf16 *rs1, vbfloat16m1x2_t vs3, + size_t vl); +void __riscv_vsseg3e16(vbool16_t vm, __bf16 *rs1, vbfloat16m1x3_t vs3, + size_t vl); +void __riscv_vsseg4e16(vbool16_t vm, __bf16 *rs1, vbfloat16m1x4_t vs3, + size_t vl); +void __riscv_vsseg5e16(vbool16_t vm, __bf16 *rs1, vbfloat16m1x5_t vs3, + size_t vl); +void __riscv_vsseg6e16(vbool16_t vm, __bf16 *rs1, vbfloat16m1x6_t vs3, + size_t vl); +void __riscv_vsseg7e16(vbool16_t vm, __bf16 *rs1, vbfloat16m1x7_t vs3, + size_t vl); +void __riscv_vsseg8e16(vbool16_t vm, __bf16 *rs1, vbfloat16m1x8_t vs3, + size_t vl); +void __riscv_vsseg2e16(vbool8_t vm, __bf16 *rs1, vbfloat16m2x2_t vs3, + size_t vl); +void __riscv_vsseg3e16(vbool8_t vm, __bf16 *rs1, vbfloat16m2x3_t vs3, + size_t vl); +void __riscv_vsseg4e16(vbool8_t vm, __bf16 *rs1, vbfloat16m2x4_t vs3, + size_t vl); +void __riscv_vsseg2e16(vbool4_t vm, __bf16 *rs1, vbfloat16m4x2_t vs3, + size_t vl); +---- + +[[overloaded-vector-strided-segment-load]] +==== Vector Strided Segment Load Intrinsics + +[,c] +---- +// masked functions +vbfloat16mf4x2_t __riscv_vlsseg2e16(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x3_t __riscv_vlsseg3e16(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x4_t __riscv_vlsseg4e16(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x5_t __riscv_vlsseg5e16(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x6_t __riscv_vlsseg6e16(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x7_t __riscv_vlsseg7e16(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x8_t __riscv_vlsseg8e16(vbool64_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x2_t __riscv_vlsseg2e16(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x3_t __riscv_vlsseg3e16(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x4_t __riscv_vlsseg4e16(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x5_t __riscv_vlsseg5e16(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x6_t __riscv_vlsseg6e16(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x7_t __riscv_vlsseg7e16(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x8_t __riscv_vlsseg8e16(vbool32_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x2_t __riscv_vlsseg2e16(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vlsseg3e16(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vlsseg4e16(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vlsseg5e16(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vlsseg6e16(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vlsseg7e16(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vlsseg8e16(vbool16_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vlsseg2e16(vbool8_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vlsseg3e16(vbool8_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vlsseg4e16(vbool8_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vlsseg2e16(vbool4_t vm, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +---- + +[[overloaded-vector-strided-segment-store]] +==== Vector Strided Segment Store Intrinsics + +[,c] +---- +void __riscv_vssseg2e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf4x2_t vs3, + size_t vl); +void __riscv_vssseg3e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf4x3_t vs3, + size_t vl); +void __riscv_vssseg4e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf4x4_t vs3, + size_t vl); +void __riscv_vssseg5e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf4x5_t vs3, + size_t vl); +void __riscv_vssseg6e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf4x6_t vs3, + size_t vl); +void __riscv_vssseg7e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf4x7_t vs3, + size_t vl); +void __riscv_vssseg8e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf4x8_t vs3, + size_t vl); +void __riscv_vssseg2e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf2x2_t vs3, + size_t vl); +void __riscv_vssseg3e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf2x3_t vs3, + size_t vl); +void __riscv_vssseg4e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf2x4_t vs3, + size_t vl); +void __riscv_vssseg5e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf2x5_t vs3, + size_t vl); +void __riscv_vssseg6e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf2x6_t vs3, + size_t vl); +void __riscv_vssseg7e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf2x7_t vs3, + size_t vl); +void __riscv_vssseg8e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16mf2x8_t vs3, + size_t vl); +void __riscv_vssseg2e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x2_t vs3, + size_t vl); +void __riscv_vssseg3e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x3_t vs3, + size_t vl); +void __riscv_vssseg4e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x4_t vs3, + size_t vl); +void __riscv_vssseg5e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x5_t vs3, + size_t vl); +void __riscv_vssseg6e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x6_t vs3, + size_t vl); +void __riscv_vssseg7e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x7_t vs3, + size_t vl); +void __riscv_vssseg8e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m1x8_t vs3, + size_t vl); +void __riscv_vssseg2e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m2x2_t vs3, + size_t vl); +void __riscv_vssseg3e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m2x3_t vs3, + size_t vl); +void __riscv_vssseg4e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m2x4_t vs3, + size_t vl); +void __riscv_vssseg2e16(__bf16 *rs1, ptrdiff_t rs2, vbfloat16m4x2_t vs3, + size_t vl); +// masked functions +void __riscv_vssseg2e16(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x2_t vs3, size_t vl); +void __riscv_vssseg3e16(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x3_t vs3, size_t vl); +void __riscv_vssseg4e16(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x4_t vs3, size_t vl); +void __riscv_vssseg5e16(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x5_t vs3, size_t vl); +void __riscv_vssseg6e16(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x6_t vs3, size_t vl); +void __riscv_vssseg7e16(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x7_t vs3, size_t vl); +void __riscv_vssseg8e16(vbool64_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf4x8_t vs3, size_t vl); +void __riscv_vssseg2e16(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x2_t vs3, size_t vl); +void __riscv_vssseg3e16(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x3_t vs3, size_t vl); +void __riscv_vssseg4e16(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x4_t vs3, size_t vl); +void __riscv_vssseg5e16(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x5_t vs3, size_t vl); +void __riscv_vssseg6e16(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x6_t vs3, size_t vl); +void __riscv_vssseg7e16(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x7_t vs3, size_t vl); +void __riscv_vssseg8e16(vbool32_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16mf2x8_t vs3, size_t vl); +void __riscv_vssseg2e16(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x2_t vs3, size_t vl); +void __riscv_vssseg3e16(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x3_t vs3, size_t vl); +void __riscv_vssseg4e16(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x4_t vs3, size_t vl); +void __riscv_vssseg5e16(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x5_t vs3, size_t vl); +void __riscv_vssseg6e16(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x6_t vs3, size_t vl); +void __riscv_vssseg7e16(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x7_t vs3, size_t vl); +void __riscv_vssseg8e16(vbool16_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m1x8_t vs3, size_t vl); +void __riscv_vssseg2e16(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2x2_t vs3, size_t vl); +void __riscv_vssseg3e16(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2x3_t vs3, size_t vl); +void __riscv_vssseg4e16(vbool8_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m2x4_t vs3, size_t vl); +void __riscv_vssseg2e16(vbool4_t vm, __bf16 *rs1, ptrdiff_t rs2, + vbfloat16m4x2_t vs3, size_t vl); +---- + +[[overloaded-vector-indexed-segment-load]] +==== Vector Indexed Segment Load Intrinsics + +[,c] +---- +vbfloat16mf4x2_t __riscv_vloxseg2ei16(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vloxseg3ei16(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vloxseg4ei16(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vloxseg5ei16(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vloxseg6ei16(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vloxseg7ei16(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vloxseg8ei16(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vloxseg2ei16(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vloxseg3ei16(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vloxseg4ei16(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vloxseg5ei16(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vloxseg6ei16(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vloxseg7ei16(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vloxseg8ei16(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vloxseg2ei16(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x3_t __riscv_vloxseg3ei16(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x4_t __riscv_vloxseg4ei16(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x5_t __riscv_vloxseg5ei16(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x6_t __riscv_vloxseg6ei16(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x7_t __riscv_vloxseg7ei16(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x8_t __riscv_vloxseg8ei16(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2x2_t __riscv_vloxseg2ei16(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x3_t __riscv_vloxseg3ei16(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x4_t __riscv_vloxseg4ei16(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4x2_t __riscv_vloxseg2ei16(const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16mf4x2_t __riscv_vluxseg2ei16(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vluxseg3ei16(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vluxseg4ei16(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vluxseg5ei16(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vluxseg6ei16(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vluxseg7ei16(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vluxseg8ei16(const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vluxseg2ei16(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vluxseg3ei16(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vluxseg4ei16(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vluxseg5ei16(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vluxseg6ei16(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vluxseg7ei16(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vluxseg8ei16(const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vluxseg2ei16(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x3_t __riscv_vluxseg3ei16(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x4_t __riscv_vluxseg4ei16(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x5_t __riscv_vluxseg5ei16(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x6_t __riscv_vluxseg6ei16(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x7_t __riscv_vluxseg7ei16(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x8_t __riscv_vluxseg8ei16(const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2x2_t __riscv_vluxseg2ei16(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x3_t __riscv_vluxseg3ei16(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x4_t __riscv_vluxseg4ei16(const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4x2_t __riscv_vluxseg2ei16(const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vloxseg2ei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x3_t __riscv_vloxseg3ei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x4_t __riscv_vloxseg4ei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x5_t __riscv_vloxseg5ei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x6_t __riscv_vloxseg6ei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x7_t __riscv_vloxseg7ei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x8_t __riscv_vloxseg8ei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2x2_t __riscv_vloxseg2ei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x3_t __riscv_vloxseg3ei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x4_t __riscv_vloxseg4ei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x5_t __riscv_vloxseg5ei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x6_t __riscv_vloxseg6ei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x7_t __riscv_vloxseg7ei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x8_t __riscv_vloxseg8ei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1x2_t __riscv_vloxseg2ei16(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vloxseg3ei16(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vloxseg4ei16(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vloxseg5ei16(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vloxseg6ei16(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vloxseg7ei16(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vloxseg8ei16(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vloxseg2ei16(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vloxseg3ei16(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vloxseg4ei16(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vloxseg2ei16(vbool4_t vm, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +vbfloat16mf4x2_t __riscv_vluxseg2ei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x3_t __riscv_vluxseg3ei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x4_t __riscv_vluxseg4ei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x5_t __riscv_vluxseg5ei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x6_t __riscv_vluxseg6ei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x7_t __riscv_vluxseg7ei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x8_t __riscv_vluxseg8ei16(vbool64_t vm, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2x2_t __riscv_vluxseg2ei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x3_t __riscv_vluxseg3ei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x4_t __riscv_vluxseg4ei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x5_t __riscv_vluxseg5ei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x6_t __riscv_vluxseg6ei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x7_t __riscv_vluxseg7ei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x8_t __riscv_vluxseg8ei16(vbool32_t vm, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1x2_t __riscv_vluxseg2ei16(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vluxseg3ei16(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vluxseg4ei16(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vluxseg5ei16(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vluxseg6ei16(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vluxseg7ei16(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vluxseg8ei16(vbool16_t vm, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vluxseg2ei16(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vluxseg3ei16(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vluxseg4ei16(vbool8_t vm, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vluxseg2ei16(vbool4_t vm, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +---- + +[[overloaded-vector-indexed-segment-store]] +==== Vector Indexed Segment Store Intrinsics + +[,c] +---- +void __riscv_vsoxseg2ei16(__bf16 *rs1, vuint16mf4_t vs2, vbfloat16mf4x2_t vs3, + size_t vl); +void __riscv_vsoxseg3ei16(__bf16 *rs1, vuint16mf4_t vs2, vbfloat16mf4x3_t vs3, + size_t vl); +void __riscv_vsoxseg4ei16(__bf16 *rs1, vuint16mf4_t vs2, vbfloat16mf4x4_t vs3, + size_t vl); +void __riscv_vsoxseg5ei16(__bf16 *rs1, vuint16mf4_t vs2, vbfloat16mf4x5_t vs3, + size_t vl); +void __riscv_vsoxseg6ei16(__bf16 *rs1, vuint16mf4_t vs2, vbfloat16mf4x6_t vs3, + size_t vl); +void __riscv_vsoxseg7ei16(__bf16 *rs1, vuint16mf4_t vs2, vbfloat16mf4x7_t vs3, + size_t vl); +void __riscv_vsoxseg8ei16(__bf16 *rs1, vuint16mf4_t vs2, vbfloat16mf4x8_t vs3, + size_t vl); +void __riscv_vsoxseg2ei16(__bf16 *rs1, vuint16mf2_t vs2, vbfloat16mf2x2_t vs3, + size_t vl); +void __riscv_vsoxseg3ei16(__bf16 *rs1, vuint16mf2_t vs2, vbfloat16mf2x3_t vs3, + size_t vl); +void __riscv_vsoxseg4ei16(__bf16 *rs1, vuint16mf2_t vs2, vbfloat16mf2x4_t vs3, + size_t vl); +void __riscv_vsoxseg5ei16(__bf16 *rs1, vuint16mf2_t vs2, vbfloat16mf2x5_t vs3, + size_t vl); +void __riscv_vsoxseg6ei16(__bf16 *rs1, vuint16mf2_t vs2, vbfloat16mf2x6_t vs3, + size_t vl); +void __riscv_vsoxseg7ei16(__bf16 *rs1, vuint16mf2_t vs2, vbfloat16mf2x7_t vs3, + size_t vl); +void __riscv_vsoxseg8ei16(__bf16 *rs1, vuint16mf2_t vs2, vbfloat16mf2x8_t vs3, + size_t vl); +void __riscv_vsoxseg2ei16(__bf16 *rs1, vuint16m1_t vs2, vbfloat16m1x2_t vs3, + size_t vl); +void __riscv_vsoxseg3ei16(__bf16 *rs1, vuint16m1_t vs2, vbfloat16m1x3_t vs3, + size_t vl); +void __riscv_vsoxseg4ei16(__bf16 *rs1, vuint16m1_t vs2, vbfloat16m1x4_t vs3, + size_t vl); +void __riscv_vsoxseg5ei16(__bf16 *rs1, vuint16m1_t vs2, vbfloat16m1x5_t vs3, + size_t vl); +void __riscv_vsoxseg6ei16(__bf16 *rs1, vuint16m1_t vs2, vbfloat16m1x6_t vs3, + size_t vl); +void __riscv_vsoxseg7ei16(__bf16 *rs1, vuint16m1_t vs2, vbfloat16m1x7_t vs3, + size_t vl); +void __riscv_vsoxseg8ei16(__bf16 *rs1, vuint16m1_t vs2, vbfloat16m1x8_t vs3, + size_t vl); +void __riscv_vsoxseg2ei16(__bf16 *rs1, vuint16m2_t vs2, vbfloat16m2x2_t vs3, + size_t vl); +void __riscv_vsoxseg3ei16(__bf16 *rs1, vuint16m2_t vs2, vbfloat16m2x3_t vs3, + size_t vl); +void __riscv_vsoxseg4ei16(__bf16 *rs1, vuint16m2_t vs2, vbfloat16m2x4_t vs3, + size_t vl); +void __riscv_vsoxseg2ei16(__bf16 *rs1, vuint16m4_t vs2, vbfloat16m4x2_t vs3, + size_t vl); +void __riscv_vsuxseg2ei16(__bf16 *rs1, vuint16mf4_t vs2, vbfloat16mf4x2_t vs3, + size_t vl); +void __riscv_vsuxseg3ei16(__bf16 *rs1, vuint16mf4_t vs2, vbfloat16mf4x3_t vs3, + size_t vl); +void __riscv_vsuxseg4ei16(__bf16 *rs1, vuint16mf4_t vs2, vbfloat16mf4x4_t vs3, + size_t vl); +void __riscv_vsuxseg5ei16(__bf16 *rs1, vuint16mf4_t vs2, vbfloat16mf4x5_t vs3, + size_t vl); +void __riscv_vsuxseg6ei16(__bf16 *rs1, vuint16mf4_t vs2, vbfloat16mf4x6_t vs3, + size_t vl); +void __riscv_vsuxseg7ei16(__bf16 *rs1, vuint16mf4_t vs2, vbfloat16mf4x7_t vs3, + size_t vl); +void __riscv_vsuxseg8ei16(__bf16 *rs1, vuint16mf4_t vs2, vbfloat16mf4x8_t vs3, + size_t vl); +void __riscv_vsuxseg2ei16(__bf16 *rs1, vuint16mf2_t vs2, vbfloat16mf2x2_t vs3, + size_t vl); +void __riscv_vsuxseg3ei16(__bf16 *rs1, vuint16mf2_t vs2, vbfloat16mf2x3_t vs3, + size_t vl); +void __riscv_vsuxseg4ei16(__bf16 *rs1, vuint16mf2_t vs2, vbfloat16mf2x4_t vs3, + size_t vl); +void __riscv_vsuxseg5ei16(__bf16 *rs1, vuint16mf2_t vs2, vbfloat16mf2x5_t vs3, + size_t vl); +void __riscv_vsuxseg6ei16(__bf16 *rs1, vuint16mf2_t vs2, vbfloat16mf2x6_t vs3, + size_t vl); +void __riscv_vsuxseg7ei16(__bf16 *rs1, vuint16mf2_t vs2, vbfloat16mf2x7_t vs3, + size_t vl); +void __riscv_vsuxseg8ei16(__bf16 *rs1, vuint16mf2_t vs2, vbfloat16mf2x8_t vs3, + size_t vl); +void __riscv_vsuxseg2ei16(__bf16 *rs1, vuint16m1_t vs2, vbfloat16m1x2_t vs3, + size_t vl); +void __riscv_vsuxseg3ei16(__bf16 *rs1, vuint16m1_t vs2, vbfloat16m1x3_t vs3, + size_t vl); +void __riscv_vsuxseg4ei16(__bf16 *rs1, vuint16m1_t vs2, vbfloat16m1x4_t vs3, + size_t vl); +void __riscv_vsuxseg5ei16(__bf16 *rs1, vuint16m1_t vs2, vbfloat16m1x5_t vs3, + size_t vl); +void __riscv_vsuxseg6ei16(__bf16 *rs1, vuint16m1_t vs2, vbfloat16m1x6_t vs3, + size_t vl); +void __riscv_vsuxseg7ei16(__bf16 *rs1, vuint16m1_t vs2, vbfloat16m1x7_t vs3, + size_t vl); +void __riscv_vsuxseg8ei16(__bf16 *rs1, vuint16m1_t vs2, vbfloat16m1x8_t vs3, + size_t vl); +void __riscv_vsuxseg2ei16(__bf16 *rs1, vuint16m2_t vs2, vbfloat16m2x2_t vs3, + size_t vl); +void __riscv_vsuxseg3ei16(__bf16 *rs1, vuint16m2_t vs2, vbfloat16m2x3_t vs3, + size_t vl); +void __riscv_vsuxseg4ei16(__bf16 *rs1, vuint16m2_t vs2, vbfloat16m2x4_t vs3, + size_t vl); +void __riscv_vsuxseg2ei16(__bf16 *rs1, vuint16m4_t vs2, vbfloat16m4x2_t vs3, + size_t vl); +// masked functions +void __riscv_vsoxseg2ei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x2_t vs3, size_t vl); +void __riscv_vsoxseg3ei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x3_t vs3, size_t vl); +void __riscv_vsoxseg4ei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x4_t vs3, size_t vl); +void __riscv_vsoxseg5ei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x5_t vs3, size_t vl); +void __riscv_vsoxseg6ei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x6_t vs3, size_t vl); +void __riscv_vsoxseg7ei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x7_t vs3, size_t vl); +void __riscv_vsoxseg8ei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x8_t vs3, size_t vl); +void __riscv_vsoxseg2ei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x2_t vs3, size_t vl); +void __riscv_vsoxseg3ei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x3_t vs3, size_t vl); +void __riscv_vsoxseg4ei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x4_t vs3, size_t vl); +void __riscv_vsoxseg5ei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x5_t vs3, size_t vl); +void __riscv_vsoxseg6ei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x6_t vs3, size_t vl); +void __riscv_vsoxseg7ei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x7_t vs3, size_t vl); +void __riscv_vsoxseg8ei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x8_t vs3, size_t vl); +void __riscv_vsoxseg2ei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x2_t vs3, size_t vl); +void __riscv_vsoxseg3ei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x3_t vs3, size_t vl); +void __riscv_vsoxseg4ei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x4_t vs3, size_t vl); +void __riscv_vsoxseg5ei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x5_t vs3, size_t vl); +void __riscv_vsoxseg6ei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x6_t vs3, size_t vl); +void __riscv_vsoxseg7ei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x7_t vs3, size_t vl); +void __riscv_vsoxseg8ei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x8_t vs3, size_t vl); +void __riscv_vsoxseg2ei16(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x2_t vs3, size_t vl); +void __riscv_vsoxseg3ei16(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x3_t vs3, size_t vl); +void __riscv_vsoxseg4ei16(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x4_t vs3, size_t vl); +void __riscv_vsoxseg2ei16(vbool4_t vm, __bf16 *rs1, vuint16m4_t vs2, + vbfloat16m4x2_t vs3, size_t vl); +void __riscv_vsuxseg2ei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x2_t vs3, size_t vl); +void __riscv_vsuxseg3ei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x3_t vs3, size_t vl); +void __riscv_vsuxseg4ei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x4_t vs3, size_t vl); +void __riscv_vsuxseg5ei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x5_t vs3, size_t vl); +void __riscv_vsuxseg6ei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x6_t vs3, size_t vl); +void __riscv_vsuxseg7ei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x7_t vs3, size_t vl); +void __riscv_vsuxseg8ei16(vbool64_t vm, __bf16 *rs1, vuint16mf4_t vs2, + vbfloat16mf4x8_t vs3, size_t vl); +void __riscv_vsuxseg2ei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x2_t vs3, size_t vl); +void __riscv_vsuxseg3ei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x3_t vs3, size_t vl); +void __riscv_vsuxseg4ei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x4_t vs3, size_t vl); +void __riscv_vsuxseg5ei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x5_t vs3, size_t vl); +void __riscv_vsuxseg6ei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x6_t vs3, size_t vl); +void __riscv_vsuxseg7ei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x7_t vs3, size_t vl); +void __riscv_vsuxseg8ei16(vbool32_t vm, __bf16 *rs1, vuint16mf2_t vs2, + vbfloat16mf2x8_t vs3, size_t vl); +void __riscv_vsuxseg2ei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x2_t vs3, size_t vl); +void __riscv_vsuxseg3ei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x3_t vs3, size_t vl); +void __riscv_vsuxseg4ei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x4_t vs3, size_t vl); +void __riscv_vsuxseg5ei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x5_t vs3, size_t vl); +void __riscv_vsuxseg6ei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x6_t vs3, size_t vl); +void __riscv_vsuxseg7ei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x7_t vs3, size_t vl); +void __riscv_vsuxseg8ei16(vbool16_t vm, __bf16 *rs1, vuint16m1_t vs2, + vbfloat16m1x8_t vs3, size_t vl); +void __riscv_vsuxseg2ei16(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x2_t vs3, size_t vl); +void __riscv_vsuxseg3ei16(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x3_t vs3, size_t vl); +void __riscv_vsuxseg4ei16(vbool8_t vm, __bf16 *rs1, vuint16m2_t vs2, + vbfloat16m2x4_t vs3, size_t vl); +void __riscv_vsuxseg2ei16(vbool4_t vm, __bf16 *rs1, vuint16m4_t vs2, + vbfloat16m4x2_t vs3, size_t vl); +---- diff --git a/auto-generated/bfloat16/overloaded_intrinsic_funcs/02_bfloat16_convert_intrinsics.adoc b/auto-generated/bfloat16/overloaded_intrinsic_funcs/02_bfloat16_convert_intrinsics.adoc new file mode 100644 index 000000000..151c6c4ec --- /dev/null +++ b/auto-generated/bfloat16/overloaded_intrinsic_funcs/02_bfloat16_convert_intrinsics.adoc @@ -0,0 +1,59 @@ + +=== BFloat16 Convert Intrinsics + +[[overloaded-bf16-vector-narrow-convert]] +==== Vector Narrowing Convert Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vfncvtbf16_f(vfloat32mf2_t vs2, size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f(vfloat32m1_t vs2, size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f(vfloat32m2_t vs2, size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f(vfloat32m4_t vs2, size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f(vfloat32m8_t vs2, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f(vbool64_t vm, vfloat32mf2_t vs2, size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f(vbool32_t vm, vfloat32m1_t vs2, size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f(vbool16_t vm, vfloat32m2_t vs2, size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f(vbool8_t vm, vfloat32m4_t vs2, size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f(vbool4_t vm, vfloat32m8_t vs2, size_t vl); +vbfloat16mf4_t __riscv_vfncvtbf16_f(vfloat32mf2_t vs2, unsigned int frm, + size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f(vfloat32m1_t vs2, unsigned int frm, + size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f(vfloat32m2_t vs2, unsigned int frm, + size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f(vfloat32m4_t vs2, unsigned int frm, + size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f(vfloat32m8_t vs2, unsigned int frm, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f(vbool64_t vm, vfloat32mf2_t vs2, + unsigned int frm, size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f(vbool32_t vm, vfloat32m1_t vs2, + unsigned int frm, size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f(vbool16_t vm, vfloat32m2_t vs2, + unsigned int frm, size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f(vbool8_t vm, vfloat32m4_t vs2, + unsigned int frm, size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f(vbool4_t vm, vfloat32m8_t vs2, + unsigned int frm, size_t vl); +---- + +[[overloaded-bf16-vector-widening-convert]] +==== Vector Widening Convert Intrinsics + +[,c] +---- +vfloat32mf2_t __riscv_vfwcvtbf16_f(vbfloat16mf4_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwcvtbf16_f(vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwcvtbf16_f(vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwcvtbf16_f(vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwcvtbf16_f(vbfloat16m4_t vs2, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwcvtbf16_f(vbool64_t vm, vbfloat16mf4_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwcvtbf16_f(vbool32_t vm, vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwcvtbf16_f(vbool16_t vm, vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwcvtbf16_f(vbool8_t vm, vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwcvtbf16_f(vbool4_t vm, vbfloat16m4_t vs2, size_t vl); +---- diff --git a/auto-generated/bfloat16/overloaded_intrinsic_funcs/03_bfloat16_arithmetic_intrinsics.adoc b/auto-generated/bfloat16/overloaded_intrinsic_funcs/03_bfloat16_arithmetic_intrinsics.adoc new file mode 100644 index 000000000..01a26a747 --- /dev/null +++ b/auto-generated/bfloat16/overloaded_intrinsic_funcs/03_bfloat16_arithmetic_intrinsics.adoc @@ -0,0 +1,145 @@ + +=== BFloat16 Arithmetic Intrinsics + +[[overloaded-bf16-widening-multiply-accumulate]] +==== Vector Widening Multiply-Accumulate Intrinsics + +[,c] +---- +vfloat32mf2_t __riscv_vfwmaccbf16(vfloat32mf2_t vd, vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16(vfloat32m2_t vd, __bf16 vs1, vbfloat16m1_t vs2, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16(vfloat32m4_t vd, __bf16 vs1, vbfloat16m2_t vs2, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16(vfloat32m8_t vd, __bf16 vs1, vbfloat16m4_t vs2, + size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwmaccbf16(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, + size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16(vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16(vbool32_t vm, vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, vbfloat16m1_t vs2, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16(vbool16_t vm, vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, vbfloat16m2_t vs2, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16(vbool8_t vm, vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, vbfloat16m4_t vs2, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16(vbool4_t vm, vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16(vfloat32mf2_t vd, vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, unsigned int frm, + size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, unsigned int frm, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, unsigned int frm, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, unsigned int frm, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, unsigned int frm, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16(vfloat32m2_t vd, __bf16 vs1, vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, unsigned int frm, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16(vfloat32m4_t vd, __bf16 vs1, vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, unsigned int frm, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16(vfloat32m8_t vd, __bf16 vs1, vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwmaccbf16(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16(vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, unsigned int frm, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16(vbool32_t vm, vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, unsigned int frm, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16(vbool16_t vm, vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, unsigned int frm, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16(vbool8_t vm, vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, unsigned int frm, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16(vbool4_t vm, vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, unsigned int frm, + size_t vl); +---- + +[[overloaded-vector-bf16-move]] +==== Vector BFloat16 Move Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vmv_v(vbfloat16mf4_t vs1, size_t vl); +vbfloat16mf2_t __riscv_vmv_v(vbfloat16mf2_t vs1, size_t vl); +vbfloat16m1_t __riscv_vmv_v(vbfloat16m1_t vs1, size_t vl); +vbfloat16m2_t __riscv_vmv_v(vbfloat16m2_t vs1, size_t vl); +vbfloat16m4_t __riscv_vmv_v(vbfloat16m4_t vs1, size_t vl); +vbfloat16m8_t __riscv_vmv_v(vbfloat16m8_t vs1, size_t vl); +---- + +[[overloaded-vector-bf16-merge]] +==== Vector BFloat16 Merge Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vmerge(vbfloat16mf4_t vs2, vbfloat16mf4_t vs1, + vbool64_t v0, size_t vl); +vbfloat16mf2_t __riscv_vmerge(vbfloat16mf2_t vs2, vbfloat16mf2_t vs1, + vbool32_t v0, size_t vl); +vbfloat16m1_t __riscv_vmerge(vbfloat16m1_t vs2, vbfloat16m1_t vs1, vbool16_t v0, + size_t vl); +vbfloat16m2_t __riscv_vmerge(vbfloat16m2_t vs2, vbfloat16m2_t vs1, vbool8_t v0, + size_t vl); +vbfloat16m4_t __riscv_vmerge(vbfloat16m4_t vs2, vbfloat16m4_t vs1, vbool4_t v0, + size_t vl); +vbfloat16m8_t __riscv_vmerge(vbfloat16m8_t vs2, vbfloat16m8_t vs1, vbool2_t v0, + size_t vl); +---- diff --git a/auto-generated/bfloat16/overloaded_intrinsic_funcs/04_bfloat16_miscellaneous_vector_utility_intrinsics.adoc b/auto-generated/bfloat16/overloaded_intrinsic_funcs/04_bfloat16_miscellaneous_vector_utility_intrinsics.adoc new file mode 100644 index 000000000..70ab53219 --- /dev/null +++ b/auto-generated/bfloat16/overloaded_intrinsic_funcs/04_bfloat16_miscellaneous_vector_utility_intrinsics.adoc @@ -0,0 +1,193 @@ + +=== BFloat16 Miscellaneous Vector Utility Intrinsics + +[[overloaded-reinterpret-cast-conversion]] +==== Reinterpret Cast Conversion Intrinsics + +[,c] +---- +// Reinterpret between different type under the same SEW/LMUL +vbfloat16mf4_t __riscv_vreinterpret_bf16mf4(vint16mf4_t src); +vbfloat16mf2_t __riscv_vreinterpret_bf16mf2(vint16mf2_t src); +vbfloat16m1_t __riscv_vreinterpret_bf16m1(vint16m1_t src); +vbfloat16m2_t __riscv_vreinterpret_bf16m2(vint16m2_t src); +vbfloat16m4_t __riscv_vreinterpret_bf16m4(vint16m4_t src); +vbfloat16m8_t __riscv_vreinterpret_bf16m8(vint16m8_t src); +vbfloat16mf4_t __riscv_vreinterpret_bf16mf4(vuint16mf4_t src); +vbfloat16mf2_t __riscv_vreinterpret_bf16mf2(vuint16mf2_t src); +vbfloat16m1_t __riscv_vreinterpret_bf16m1(vuint16m1_t src); +vbfloat16m2_t __riscv_vreinterpret_bf16m2(vuint16m2_t src); +vbfloat16m4_t __riscv_vreinterpret_bf16m4(vuint16m4_t src); +vbfloat16m8_t __riscv_vreinterpret_bf16m8(vuint16m8_t src); +vint16mf4_t __riscv_vreinterpret_i16mf4(vbfloat16mf4_t src); +vint16mf2_t __riscv_vreinterpret_i16mf2(vbfloat16mf2_t src); +vint16m1_t __riscv_vreinterpret_i16m1(vbfloat16m1_t src); +vint16m2_t __riscv_vreinterpret_i16m2(vbfloat16m2_t src); +vint16m4_t __riscv_vreinterpret_i16m4(vbfloat16m4_t src); +vint16m8_t __riscv_vreinterpret_i16m8(vbfloat16m8_t src); +vuint16mf4_t __riscv_vreinterpret_u16mf4(vbfloat16mf4_t src); +vuint16mf2_t __riscv_vreinterpret_u16mf2(vbfloat16mf2_t src); +vuint16m1_t __riscv_vreinterpret_u16m1(vbfloat16m1_t src); +vuint16m2_t __riscv_vreinterpret_u16m2(vbfloat16m2_t src); +vuint16m4_t __riscv_vreinterpret_u16m4(vbfloat16m4_t src); +vuint16m8_t __riscv_vreinterpret_u16m8(vbfloat16m8_t src); +---- + +[[overloaded-vector-lmul-extensionn]] +==== Vector LMUL Extension Intrinsics + +[,c] +---- +vbfloat16mf2_t __riscv_vlmul_ext_bf16mf2(vbfloat16mf4_t value); +vbfloat16m1_t __riscv_vlmul_ext_bf16m1(vbfloat16mf4_t value); +vbfloat16m2_t __riscv_vlmul_ext_bf16m2(vbfloat16mf4_t value); +vbfloat16m4_t __riscv_vlmul_ext_bf16m4(vbfloat16mf4_t value); +vbfloat16m8_t __riscv_vlmul_ext_bf16m8(vbfloat16mf4_t value); +vbfloat16m1_t __riscv_vlmul_ext_bf16m1(vbfloat16mf2_t value); +vbfloat16m2_t __riscv_vlmul_ext_bf16m2(vbfloat16mf2_t value); +vbfloat16m4_t __riscv_vlmul_ext_bf16m4(vbfloat16mf2_t value); +vbfloat16m8_t __riscv_vlmul_ext_bf16m8(vbfloat16mf2_t value); +vbfloat16m2_t __riscv_vlmul_ext_bf16m2(vbfloat16m1_t value); +vbfloat16m4_t __riscv_vlmul_ext_bf16m4(vbfloat16m1_t value); +vbfloat16m8_t __riscv_vlmul_ext_bf16m8(vbfloat16m1_t value); +vbfloat16m4_t __riscv_vlmul_ext_bf16m4(vbfloat16m2_t value); +vbfloat16m8_t __riscv_vlmul_ext_bf16m8(vbfloat16m2_t value); +vbfloat16m8_t __riscv_vlmul_ext_bf16m8(vbfloat16m4_t value); +---- + +[[overloaded-vector-lmul-truncation]] +==== Vector LMUL Truncation Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vlmul_trunc_bf16mf4(vbfloat16mf2_t value); +vbfloat16mf4_t __riscv_vlmul_trunc_bf16mf4(vbfloat16m1_t value); +vbfloat16mf2_t __riscv_vlmul_trunc_bf16mf2(vbfloat16m1_t value); +vbfloat16mf4_t __riscv_vlmul_trunc_bf16mf4(vbfloat16m2_t value); +vbfloat16mf2_t __riscv_vlmul_trunc_bf16mf2(vbfloat16m2_t value); +vbfloat16m1_t __riscv_vlmul_trunc_bf16m1(vbfloat16m2_t value); +vbfloat16mf4_t __riscv_vlmul_trunc_bf16mf4(vbfloat16m4_t value); +vbfloat16mf2_t __riscv_vlmul_trunc_bf16mf2(vbfloat16m4_t value); +vbfloat16m1_t __riscv_vlmul_trunc_bf16m1(vbfloat16m4_t value); +vbfloat16m2_t __riscv_vlmul_trunc_bf16m2(vbfloat16m4_t value); +vbfloat16mf4_t __riscv_vlmul_trunc_bf16mf4(vbfloat16m8_t value); +vbfloat16mf2_t __riscv_vlmul_trunc_bf16mf2(vbfloat16m8_t value); +vbfloat16m1_t __riscv_vlmul_trunc_bf16m1(vbfloat16m8_t value); +vbfloat16m2_t __riscv_vlmul_trunc_bf16m2(vbfloat16m8_t value); +vbfloat16m4_t __riscv_vlmul_trunc_bf16m4(vbfloat16m8_t value); +---- + +[[overloaded-vector-initialization]] +==== Vector Initialization Intrinsics +Intrinsics here don't have an overloaded variant. + +[[overloaded-vector-insertion]] +==== Vector Insertion Intrinsics + +[,c] +---- +vbfloat16m2_t __riscv_vset(vbfloat16m2_t dest, size_t index, + vbfloat16m1_t value); +vbfloat16m4_t __riscv_vset(vbfloat16m4_t dest, size_t index, + vbfloat16m1_t value); +vbfloat16m4_t __riscv_vset(vbfloat16m4_t dest, size_t index, + vbfloat16m2_t value); +vbfloat16m8_t __riscv_vset(vbfloat16m8_t dest, size_t index, + vbfloat16m1_t value); +vbfloat16m8_t __riscv_vset(vbfloat16m8_t dest, size_t index, + vbfloat16m2_t value); +vbfloat16m8_t __riscv_vset(vbfloat16m8_t dest, size_t index, + vbfloat16m4_t value); +vbfloat16mf4x2_t __riscv_vset(vbfloat16mf4x2_t dest, size_t index, + vbfloat16mf4_t value); +vbfloat16mf4x3_t __riscv_vset(vbfloat16mf4x3_t dest, size_t index, + vbfloat16mf4_t value); +vbfloat16mf4x4_t __riscv_vset(vbfloat16mf4x4_t dest, size_t index, + vbfloat16mf4_t value); +vbfloat16mf4x5_t __riscv_vset(vbfloat16mf4x5_t dest, size_t index, + vbfloat16mf4_t value); +vbfloat16mf4x6_t __riscv_vset(vbfloat16mf4x6_t dest, size_t index, + vbfloat16mf4_t value); +vbfloat16mf4x7_t __riscv_vset(vbfloat16mf4x7_t dest, size_t index, + vbfloat16mf4_t value); +vbfloat16mf4x8_t __riscv_vset(vbfloat16mf4x8_t dest, size_t index, + vbfloat16mf4_t value); +vbfloat16mf2x2_t __riscv_vset(vbfloat16mf2x2_t dest, size_t index, + vbfloat16mf2_t value); +vbfloat16mf2x3_t __riscv_vset(vbfloat16mf2x3_t dest, size_t index, + vbfloat16mf2_t value); +vbfloat16mf2x4_t __riscv_vset(vbfloat16mf2x4_t dest, size_t index, + vbfloat16mf2_t value); +vbfloat16mf2x5_t __riscv_vset(vbfloat16mf2x5_t dest, size_t index, + vbfloat16mf2_t value); +vbfloat16mf2x6_t __riscv_vset(vbfloat16mf2x6_t dest, size_t index, + vbfloat16mf2_t value); +vbfloat16mf2x7_t __riscv_vset(vbfloat16mf2x7_t dest, size_t index, + vbfloat16mf2_t value); +vbfloat16mf2x8_t __riscv_vset(vbfloat16mf2x8_t dest, size_t index, + vbfloat16mf2_t value); +vbfloat16m1x2_t __riscv_vset(vbfloat16m1x2_t dest, size_t index, + vbfloat16m1_t value); +vbfloat16m1x3_t __riscv_vset(vbfloat16m1x3_t dest, size_t index, + vbfloat16m1_t value); +vbfloat16m1x4_t __riscv_vset(vbfloat16m1x4_t dest, size_t index, + vbfloat16m1_t value); +vbfloat16m1x5_t __riscv_vset(vbfloat16m1x5_t dest, size_t index, + vbfloat16m1_t value); +vbfloat16m1x6_t __riscv_vset(vbfloat16m1x6_t dest, size_t index, + vbfloat16m1_t value); +vbfloat16m1x7_t __riscv_vset(vbfloat16m1x7_t dest, size_t index, + vbfloat16m1_t value); +vbfloat16m1x8_t __riscv_vset(vbfloat16m1x8_t dest, size_t index, + vbfloat16m1_t value); +vbfloat16m2x2_t __riscv_vset(vbfloat16m2x2_t dest, size_t index, + vbfloat16m2_t value); +vbfloat16m2x3_t __riscv_vset(vbfloat16m2x3_t dest, size_t index, + vbfloat16m2_t value); +vbfloat16m2x4_t __riscv_vset(vbfloat16m2x4_t dest, size_t index, + vbfloat16m2_t value); +vbfloat16m4x2_t __riscv_vset(vbfloat16m4x2_t dest, size_t index, + vbfloat16m4_t value); +---- + +[[overloaded-vector-extraction]] +==== Vector Extraction Intrinsics + +[,c] +---- +vbfloat16m1_t __riscv_vget_bf16m1(vbfloat16m2_t src, size_t index); +vbfloat16m1_t __riscv_vget_bf16m1(vbfloat16m4_t src, size_t index); +vbfloat16m1_t __riscv_vget_bf16m1(vbfloat16m8_t src, size_t index); +vbfloat16m2_t __riscv_vget_bf16m2(vbfloat16m4_t src, size_t index); +vbfloat16m2_t __riscv_vget_bf16m2(vbfloat16m8_t src, size_t index); +vbfloat16m4_t __riscv_vget_bf16m4(vbfloat16m8_t src, size_t index); +vbfloat16mf4_t __riscv_vget_bf16mf4(vbfloat16mf4x2_t src, size_t index); +vbfloat16mf4_t __riscv_vget_bf16mf4(vbfloat16mf4x3_t src, size_t index); +vbfloat16mf4_t __riscv_vget_bf16mf4(vbfloat16mf4x4_t src, size_t index); +vbfloat16mf4_t __riscv_vget_bf16mf4(vbfloat16mf4x5_t src, size_t index); +vbfloat16mf4_t __riscv_vget_bf16mf4(vbfloat16mf4x6_t src, size_t index); +vbfloat16mf4_t __riscv_vget_bf16mf4(vbfloat16mf4x7_t src, size_t index); +vbfloat16mf4_t __riscv_vget_bf16mf4(vbfloat16mf4x8_t src, size_t index); +vbfloat16mf2_t __riscv_vget_bf16mf2(vbfloat16mf2x2_t src, size_t index); +vbfloat16mf2_t __riscv_vget_bf16mf2(vbfloat16mf2x3_t src, size_t index); +vbfloat16mf2_t __riscv_vget_bf16mf2(vbfloat16mf2x4_t src, size_t index); +vbfloat16mf2_t __riscv_vget_bf16mf2(vbfloat16mf2x5_t src, size_t index); +vbfloat16mf2_t __riscv_vget_bf16mf2(vbfloat16mf2x6_t src, size_t index); +vbfloat16mf2_t __riscv_vget_bf16mf2(vbfloat16mf2x7_t src, size_t index); +vbfloat16mf2_t __riscv_vget_bf16mf2(vbfloat16mf2x8_t src, size_t index); +vbfloat16m1_t __riscv_vget_bf16m1(vbfloat16m1x2_t src, size_t index); +vbfloat16m1_t __riscv_vget_bf16m1(vbfloat16m1x3_t src, size_t index); +vbfloat16m1_t __riscv_vget_bf16m1(vbfloat16m1x4_t src, size_t index); +vbfloat16m1_t __riscv_vget_bf16m1(vbfloat16m1x5_t src, size_t index); +vbfloat16m1_t __riscv_vget_bf16m1(vbfloat16m1x6_t src, size_t index); +vbfloat16m1_t __riscv_vget_bf16m1(vbfloat16m1x7_t src, size_t index); +vbfloat16m1_t __riscv_vget_bf16m1(vbfloat16m1x8_t src, size_t index); +vbfloat16m2_t __riscv_vget_bf16m2(vbfloat16m2x2_t src, size_t index); +vbfloat16m2_t __riscv_vget_bf16m2(vbfloat16m2x3_t src, size_t index); +vbfloat16m2_t __riscv_vget_bf16m2(vbfloat16m2x4_t src, size_t index); +vbfloat16m4_t __riscv_vget_bf16m4(vbfloat16m4x2_t src, size_t index); +---- + +[[overloaded-vector-creation]] +==== Vector Creation Intrinsics +Intrinsics here don't have an overloaded variant. diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vfncvtbf16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vfncvtbf16.c new file mode 100644 index 000000000..c408c7a42 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vfncvtbf16.c @@ -0,0 +1,243 @@ +#include +#include + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_tu(vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf4_tu(vd, vs2, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_tu(vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf2_tu(vd, vs2, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_tu(vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m1_tu(vd, vs2, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_tu(vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m2_tu(vd, vs2, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_tu(vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m4_tu(vd, vs2, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_tum(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf4_tum(vm, vd, vs2, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_tum(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf2_tum(vm, vd, vs2, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m1_tum(vm, vd, vs2, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m2_tum(vm, vd, vs2, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m4_tum(vm, vd, vs2, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_tumu(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf4_tumu(vm, vd, vs2, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_tumu(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf2_tumu(vm, vd, vs2, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m1_tumu(vm, vd, vs2, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m2_tumu(vm, vd, vs2, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m4_tumu(vm, vd, vs2, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf4_mu(vm, vd, vs2, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf2_mu(vm, vd, vs2, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m1_mu(vm, vd, vs2, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m2_mu(vm, vd, vs2, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m4_mu(vm, vd, vs2, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_rm_tu(vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_rm_tu(vbfloat16mf2_t vd, + vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm_tu(vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_rm_tu(vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_rm_tu(vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_rm_tum(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_rm_tum(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm_tum(vbool16_t vm, + vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_rm_tum(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_rm_tum(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_rm_tumu(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_rm_tumu(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm_tumu(vbool16_t vm, + vbfloat16m1_t vd, + vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_rm_tumu(vbool8_t vm, + vbfloat16m2_t vd, + vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_rm_tumu(vbool4_t vm, + vbfloat16m4_t vd, + vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_rm_mu(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_rm_mu(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm_mu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_rm_mu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_rm_mu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vfwcvtbf16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vfwcvtbf16.c new file mode 100644 index 000000000..9ce24c6ba --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vfwcvtbf16.c @@ -0,0 +1,102 @@ +#include +#include + +vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32mf2_tu(vd, vs2, vl); +} + +vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1_tu(vfloat32m1_t vd, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m1_tu(vd, vs2, vl); +} + +vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m2_tu(vd, vs2, vl); +} + +vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m4_tu(vd, vs2, vl); +} + +vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m8_tu(vd, vs2, vl); +} + +vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32mf2_tum(vm, vd, vs2, vl); +} + +vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m1_tum(vm, vd, vs2, vl); +} + +vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m2_tum(vm, vd, vs2, vl); +} + +vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m4_tum(vm, vd, vs2, vl); +} + +vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m8_tum(vm, vd, vs2, vl); +} + +vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32mf2_tumu(vm, vd, vs2, vl); +} + +vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m1_tumu(vm, vd, vs2, vl); +} + +vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m2_tumu(vm, vd, vs2, vl); +} + +vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m4_tumu(vm, vd, vs2, vl); +} + +vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m8_tumu(vm, vd, vs2, vl); +} + +vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32mf2_mu(vm, vd, vs2, vl); +} + +vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m1_mu(vm, vd, vs2, vl); +} + +vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m2_mu(vm, vd, vs2, vl); +} + +vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m4_mu(vm, vd, vs2, vl); +} + +vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vfwmaccbf16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vfwmaccbf16.c new file mode 100644 index 000000000..bc8c20900 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vfwmaccbf16.c @@ -0,0 +1,496 @@ +#include +#include + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32mf2_tu(vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32mf2_tu(vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_tu(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m1_tu(vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m1_tu(vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m2_tu(vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m2_tu(vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m4_tu(vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m4_tu(vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m8_tu(vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m8_tu(vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32mf2_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32mf2_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m1_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m1_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m2_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m2_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m4_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m4_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m8_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m8_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32mf2_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32mf2_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m1_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m1_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m2_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m2_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m4_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m4_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m8_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m8_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32mf2_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32mf2_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m1_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m1_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m2_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m2_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m4_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m4_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m8_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m8_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_rm_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_rm_tu(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_rm_tu(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_rm_tu(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32mf2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vle16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vle16.c new file mode 100644 index 000000000..d147cdfec --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vle16.c @@ -0,0 +1,122 @@ +#include +#include + +vbfloat16mf4_t test_vle16_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16_v_bf16mf4_tu(vd, rs1, vl); +} + +vbfloat16mf2_t test_vle16_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16_v_bf16mf2_tu(vd, rs1, vl); +} + +vbfloat16m1_t test_vle16_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16_v_bf16m1_tu(vd, rs1, vl); +} + +vbfloat16m2_t test_vle16_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16_v_bf16m2_tu(vd, rs1, vl); +} + +vbfloat16m4_t test_vle16_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16_v_bf16m4_tu(vd, rs1, vl); +} + +vbfloat16m8_t test_vle16_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16_v_bf16m8_tu(vd, rs1, vl); +} + +vbfloat16mf4_t test_vle16_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16mf4_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2_t test_vle16_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16mf2_tum(vm, vd, rs1, vl); +} + +vbfloat16m1_t test_vle16_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m1_tum(vm, vd, rs1, vl); +} + +vbfloat16m2_t test_vle16_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m2_tum(vm, vd, rs1, vl); +} + +vbfloat16m4_t test_vle16_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m4_tum(vm, vd, rs1, vl); +} + +vbfloat16m8_t test_vle16_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m8_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4_t test_vle16_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16mf4_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2_t test_vle16_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16mf2_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1_t test_vle16_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m1_tumu(vm, vd, rs1, vl); +} + +vbfloat16m2_t test_vle16_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m2_tumu(vm, vd, rs1, vl); +} + +vbfloat16m4_t test_vle16_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m4_tumu(vm, vd, rs1, vl); +} + +vbfloat16m8_t test_vle16_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m8_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4_t test_vle16_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16mf4_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2_t test_vle16_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16mf2_mu(vm, vd, rs1, vl); +} + +vbfloat16m1_t test_vle16_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m1_mu(vm, vd, rs1, vl); +} + +vbfloat16m2_t test_vle16_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m2_mu(vm, vd, rs1, vl); +} + +vbfloat16m4_t test_vle16_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m4_mu(vm, vd, rs1, vl); +} + +vbfloat16m8_t test_vle16_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m8_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vle16ff.c b/auto-generated/bfloat16/policy_funcs/api-testing/vle16ff.c new file mode 100644 index 000000000..200105f94 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vle16ff.c @@ -0,0 +1,140 @@ +#include +#include + +vbfloat16mf4_t test_vle16ff_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_bf16mf4_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2_t test_vle16ff_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_bf16mf2_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1_t test_vle16ff_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_bf16m1_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m2_t test_vle16ff_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_bf16m2_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m4_t test_vle16ff_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_bf16m4_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m8_t test_vle16ff_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_bf16m8_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4_t test_vle16ff_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16mf4_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2_t test_vle16ff_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16mf2_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1_t test_vle16ff_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m1_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2_t test_vle16ff_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m2_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m4_t test_vle16ff_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m4_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m8_t test_vle16ff_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m8_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4_t test_vle16ff_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16mf4_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2_t test_vle16ff_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16mf2_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1_t test_vle16ff_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m1_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2_t test_vle16ff_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m2_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m4_t test_vle16ff_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m4_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m8_t test_vle16ff_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m8_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4_t test_vle16ff_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16mf4_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2_t test_vle16ff_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16mf2_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1_t test_vle16ff_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m1_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2_t test_vle16ff_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m2_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m4_t test_vle16ff_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m4_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m8_t test_vle16ff_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m8_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vloxei16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vloxei16.c new file mode 100644 index 000000000..75ab2d987 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vloxei16.c @@ -0,0 +1,140 @@ +#include +#include + +vbfloat16mf4_t test_vloxei16_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_bf16mf4_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vloxei16_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_bf16mf2_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vloxei16_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_bf16m1_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vloxei16_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_bf16m2_tu(vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vloxei16_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_bf16m4_tu(vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vloxei16_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, + vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_bf16m8_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vloxei16_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16mf4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vloxei16_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16mf2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vloxei16_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m1_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vloxei16_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vloxei16_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vloxei16_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m8_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vloxei16_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16mf4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vloxei16_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16mf2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vloxei16_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m1_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vloxei16_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vloxei16_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vloxei16_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m8_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vloxei16_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16mf4_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vloxei16_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16mf2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vloxei16_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m1_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vloxei16_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vloxei16_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m4_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vloxei16_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m8_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vloxseg2ei16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vloxseg2ei16.c new file mode 100644 index 000000000..0ed314d13 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vloxseg2ei16.c @@ -0,0 +1,139 @@ +#include +#include + +vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16mf4x2_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16mf2x2_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m1x2_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m2x2_tu(vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m4x2_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tum(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg2ei16_v_bf16mf4x2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tum(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg2ei16_v_bf16mf2x2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tum(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m1x2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tum(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m2x2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tum(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m4x2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tumu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg2ei16_v_bf16mf4x2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tumu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg2ei16_v_bf16mf2x2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tumu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m1x2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tumu(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m2x2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tumu(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m4x2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_mu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16mf4x2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_mu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16mf2x2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_mu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m1x2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m2x2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m4x2_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vloxseg3ei16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vloxseg3ei16.c new file mode 100644 index 000000000..7939b8fb1 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vloxseg3ei16.c @@ -0,0 +1,113 @@ +#include +#include + +vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16mf4x3_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16mf2x3_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16m1x3_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16m2x3_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tum(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg3ei16_v_bf16mf4x3_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tum(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg3ei16_v_bf16mf2x3_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tum(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16m1x3_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tum(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16m2x3_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tumu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg3ei16_v_bf16mf4x3_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tumu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg3ei16_v_bf16mf2x3_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tumu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16m1x3_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tumu(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16m2x3_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_mu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16mf4x3_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_mu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16mf2x3_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_mu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16m1x3_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16m2x3_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vloxseg4ei16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vloxseg4ei16.c new file mode 100644 index 000000000..d0b103679 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vloxseg4ei16.c @@ -0,0 +1,113 @@ +#include +#include + +vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16mf4x4_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16mf2x4_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16m1x4_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16m2x4_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tum(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg4ei16_v_bf16mf4x4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tum(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg4ei16_v_bf16mf2x4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tum(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16m1x4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tum(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16m2x4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tumu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg4ei16_v_bf16mf4x4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tumu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg4ei16_v_bf16mf2x4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tumu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16m1x4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tumu(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16m2x4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_mu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16mf4x4_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_mu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16mf2x4_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_mu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16m1x4_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16m2x4_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vloxseg5ei16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vloxseg5ei16.c new file mode 100644 index 000000000..3c915d4d9 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vloxseg5ei16.c @@ -0,0 +1,87 @@ +#include +#include + +vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_bf16mf4x5_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_bf16mf2x5_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_bf16m1x5_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tum(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg5ei16_v_bf16mf4x5_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tum(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg5ei16_v_bf16mf2x5_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tum(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_bf16m1x5_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tumu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg5ei16_v_bf16mf4x5_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tumu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg5ei16_v_bf16mf2x5_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tumu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_bf16m1x5_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_mu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_bf16mf4x5_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_mu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_bf16mf2x5_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_mu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_bf16m1x5_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vloxseg6ei16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vloxseg6ei16.c new file mode 100644 index 000000000..55ab43069 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vloxseg6ei16.c @@ -0,0 +1,87 @@ +#include +#include + +vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_bf16mf4x6_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_bf16mf2x6_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_bf16m1x6_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tum(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg6ei16_v_bf16mf4x6_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tum(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg6ei16_v_bf16mf2x6_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tum(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_bf16m1x6_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tumu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg6ei16_v_bf16mf4x6_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tumu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg6ei16_v_bf16mf2x6_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tumu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_bf16m1x6_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_mu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_bf16mf4x6_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_mu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_bf16mf2x6_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_mu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_bf16m1x6_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vloxseg7ei16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vloxseg7ei16.c new file mode 100644 index 000000000..c430c1e47 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vloxseg7ei16.c @@ -0,0 +1,87 @@ +#include +#include + +vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_bf16mf4x7_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_bf16mf2x7_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_bf16m1x7_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tum(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg7ei16_v_bf16mf4x7_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tum(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg7ei16_v_bf16mf2x7_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tum(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_bf16m1x7_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tumu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg7ei16_v_bf16mf4x7_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tumu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg7ei16_v_bf16mf2x7_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tumu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_bf16m1x7_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_mu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_bf16mf4x7_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_mu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_bf16mf2x7_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_mu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_bf16m1x7_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vloxseg8ei16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vloxseg8ei16.c new file mode 100644 index 000000000..564807d33 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vloxseg8ei16.c @@ -0,0 +1,87 @@ +#include +#include + +vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_bf16mf4x8_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_bf16mf2x8_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_bf16m1x8_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tum(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg8ei16_v_bf16mf4x8_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tum(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg8ei16_v_bf16mf2x8_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tum(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_bf16m1x8_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tumu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg8ei16_v_bf16mf4x8_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tumu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg8ei16_v_bf16mf2x8_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tumu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_bf16m1x8_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_mu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_bf16mf4x8_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_mu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_bf16mf2x8_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_mu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_bf16m1x8_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vlse16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vlse16.c new file mode 100644 index 000000000..ece6f8cae --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vlse16.c @@ -0,0 +1,140 @@ +#include +#include + +vbfloat16mf4_t test_vlse16_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_bf16mf4_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vlse16_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_bf16mf2_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vlse16_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_bf16m1_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vlse16_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_bf16m2_tu(vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vlse16_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_bf16m4_tu(vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vlse16_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_bf16m8_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vlse16_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16mf4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vlse16_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16mf2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vlse16_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m1_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vlse16_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vlse16_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vlse16_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m8_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vlse16_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16mf4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vlse16_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16mf2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vlse16_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m1_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vlse16_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vlse16_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vlse16_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m8_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vlse16_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16mf4_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vlse16_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16mf2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vlse16_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m1_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vlse16_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vlse16_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m4_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vlse16_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m8_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vlseg2e16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vlseg2e16.c new file mode 100644 index 000000000..f2fbf24f5 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vlseg2e16.c @@ -0,0 +1,108 @@ +#include +#include + +vbfloat16mf4x2_t test_vlseg2e16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16mf4x2_tu(vd, rs1, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16mf2x2_tu(vd, rs1, vl); +} + +vbfloat16m1x2_t test_vlseg2e16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16m1x2_tu(vd, rs1, vl); +} + +vbfloat16m2x2_t test_vlseg2e16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16m2x2_tu(vd, rs1, vl); +} + +vbfloat16m4x2_t test_vlseg2e16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16m4x2_tu(vd, rs1, vl); +} + +vbfloat16mf4x2_t test_vlseg2e16_v_bf16mf4x2_tum(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16mf4x2_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16_v_bf16mf2x2_tum(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16mf2x2_tum(vm, vd, rs1, vl); +} + +vbfloat16m1x2_t test_vlseg2e16_v_bf16m1x2_tum(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16m1x2_tum(vm, vd, rs1, vl); +} + +vbfloat16m2x2_t test_vlseg2e16_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16m2x2_tum(vm, vd, rs1, vl); +} + +vbfloat16m4x2_t test_vlseg2e16_v_bf16m4x2_tum(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16m4x2_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4x2_t test_vlseg2e16_v_bf16mf4x2_tumu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16mf4x2_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16_v_bf16mf2x2_tumu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16mf2x2_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1x2_t test_vlseg2e16_v_bf16m1x2_tumu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16m1x2_tumu(vm, vd, rs1, vl); +} + +vbfloat16m2x2_t test_vlseg2e16_v_bf16m2x2_tumu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16m2x2_tumu(vm, vd, rs1, vl); +} + +vbfloat16m4x2_t test_vlseg2e16_v_bf16m4x2_tumu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16m4x2_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4x2_t test_vlseg2e16_v_bf16mf4x2_mu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16mf4x2_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16_v_bf16mf2x2_mu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16mf2x2_mu(vm, vd, rs1, vl); +} + +vbfloat16m1x2_t test_vlseg2e16_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16m1x2_mu(vm, vd, rs1, vl); +} + +vbfloat16m2x2_t test_vlseg2e16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16m2x2_mu(vm, vd, rs1, vl); +} + +vbfloat16m4x2_t test_vlseg2e16_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16m4x2_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vlseg2e16ff.c b/auto-generated/bfloat16/policy_funcs/api-testing/vlseg2e16ff.c new file mode 100644 index 000000000..da7df9f7f --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vlseg2e16ff.c @@ -0,0 +1,132 @@ +#include +#include + +vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16mf4x2_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16mf2x2_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_tu(vbfloat16m1x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m1x2_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_tu(vbfloat16m2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m2x2_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_tu(vbfloat16m4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m4x2_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_tum(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16mf4x2_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_tum(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16mf2x2_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_tum(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m1x2_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m2x2_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_tum(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m4x2_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_tumu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16mf4x2_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_tumu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16mf2x2_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_tumu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m1x2_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_tumu(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m2x2_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_tumu(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m4x2_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_mu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16mf4x2_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_mu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16mf2x2_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m1x2_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m2x2_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m4x2_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vlseg3e16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vlseg3e16.c new file mode 100644 index 000000000..550192ec0 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vlseg3e16.c @@ -0,0 +1,88 @@ +#include +#include + +vbfloat16mf4x3_t test_vlseg3e16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16mf4x3_tu(vd, rs1, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16mf2x3_tu(vd, rs1, vl); +} + +vbfloat16m1x3_t test_vlseg3e16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16m1x3_tu(vd, rs1, vl); +} + +vbfloat16m2x3_t test_vlseg3e16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16m2x3_tu(vd, rs1, vl); +} + +vbfloat16mf4x3_t test_vlseg3e16_v_bf16mf4x3_tum(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16mf4x3_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16_v_bf16mf2x3_tum(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16mf2x3_tum(vm, vd, rs1, vl); +} + +vbfloat16m1x3_t test_vlseg3e16_v_bf16m1x3_tum(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16m1x3_tum(vm, vd, rs1, vl); +} + +vbfloat16m2x3_t test_vlseg3e16_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16m2x3_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4x3_t test_vlseg3e16_v_bf16mf4x3_tumu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16mf4x3_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16_v_bf16mf2x3_tumu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16mf2x3_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1x3_t test_vlseg3e16_v_bf16m1x3_tumu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16m1x3_tumu(vm, vd, rs1, vl); +} + +vbfloat16m2x3_t test_vlseg3e16_v_bf16m2x3_tumu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16m2x3_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4x3_t test_vlseg3e16_v_bf16mf4x3_mu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16mf4x3_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16_v_bf16mf2x3_mu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16mf2x3_mu(vm, vd, rs1, vl); +} + +vbfloat16m1x3_t test_vlseg3e16_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16m1x3_mu(vm, vd, rs1, vl); +} + +vbfloat16m2x3_t test_vlseg3e16_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16m2x3_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vlseg3e16ff.c b/auto-generated/bfloat16/policy_funcs/api-testing/vlseg3e16ff.c new file mode 100644 index 000000000..6be408016 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vlseg3e16ff.c @@ -0,0 +1,107 @@ +#include +#include + +vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16mf4x3_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16mf2x3_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_tu(vbfloat16m1x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16m1x3_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_tu(vbfloat16m2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16m2x3_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_tum(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16mf4x3_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_tum(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16mf2x3_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_tum(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16m1x3_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16m2x3_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_tumu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16mf4x3_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_tumu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16mf2x3_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_tumu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16m1x3_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_tumu(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16m2x3_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_mu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16mf4x3_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_mu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16mf2x3_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16m1x3_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16m2x3_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vlseg4e16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vlseg4e16.c new file mode 100644 index 000000000..ba875d221 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vlseg4e16.c @@ -0,0 +1,88 @@ +#include +#include + +vbfloat16mf4x4_t test_vlseg4e16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16mf4x4_tu(vd, rs1, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16mf2x4_tu(vd, rs1, vl); +} + +vbfloat16m1x4_t test_vlseg4e16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16m1x4_tu(vd, rs1, vl); +} + +vbfloat16m2x4_t test_vlseg4e16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16m2x4_tu(vd, rs1, vl); +} + +vbfloat16mf4x4_t test_vlseg4e16_v_bf16mf4x4_tum(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16mf4x4_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16_v_bf16mf2x4_tum(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16mf2x4_tum(vm, vd, rs1, vl); +} + +vbfloat16m1x4_t test_vlseg4e16_v_bf16m1x4_tum(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16m1x4_tum(vm, vd, rs1, vl); +} + +vbfloat16m2x4_t test_vlseg4e16_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16m2x4_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4x4_t test_vlseg4e16_v_bf16mf4x4_tumu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16mf4x4_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16_v_bf16mf2x4_tumu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16mf2x4_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1x4_t test_vlseg4e16_v_bf16m1x4_tumu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16m1x4_tumu(vm, vd, rs1, vl); +} + +vbfloat16m2x4_t test_vlseg4e16_v_bf16m2x4_tumu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16m2x4_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4x4_t test_vlseg4e16_v_bf16mf4x4_mu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16mf4x4_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16_v_bf16mf2x4_mu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16mf2x4_mu(vm, vd, rs1, vl); +} + +vbfloat16m1x4_t test_vlseg4e16_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16m1x4_mu(vm, vd, rs1, vl); +} + +vbfloat16m2x4_t test_vlseg4e16_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16m2x4_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vlseg4e16ff.c b/auto-generated/bfloat16/policy_funcs/api-testing/vlseg4e16ff.c new file mode 100644 index 000000000..792e1be03 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vlseg4e16ff.c @@ -0,0 +1,107 @@ +#include +#include + +vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16mf4x4_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16mf2x4_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_tu(vbfloat16m1x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16m1x4_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_tu(vbfloat16m2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16m2x4_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_tum(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16mf4x4_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_tum(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16mf2x4_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_tum(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16m1x4_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16m2x4_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_tumu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16mf4x4_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_tumu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16mf2x4_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_tumu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16m1x4_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_tumu(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16m2x4_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_mu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16mf4x4_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_mu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16mf2x4_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16m1x4_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16m2x4_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vlseg5e16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vlseg5e16.c new file mode 100644 index 000000000..37a4cdad6 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vlseg5e16.c @@ -0,0 +1,68 @@ +#include +#include + +vbfloat16mf4x5_t test_vlseg5e16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_v_bf16mf4x5_tu(vd, rs1, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_v_bf16mf2x5_tu(vd, rs1, vl); +} + +vbfloat16m1x5_t test_vlseg5e16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_v_bf16m1x5_tu(vd, rs1, vl); +} + +vbfloat16mf4x5_t test_vlseg5e16_v_bf16mf4x5_tum(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_v_bf16mf4x5_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16_v_bf16mf2x5_tum(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_v_bf16mf2x5_tum(vm, vd, rs1, vl); +} + +vbfloat16m1x5_t test_vlseg5e16_v_bf16m1x5_tum(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_v_bf16m1x5_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4x5_t test_vlseg5e16_v_bf16mf4x5_tumu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_v_bf16mf4x5_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16_v_bf16mf2x5_tumu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_v_bf16mf2x5_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1x5_t test_vlseg5e16_v_bf16m1x5_tumu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_v_bf16m1x5_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4x5_t test_vlseg5e16_v_bf16mf4x5_mu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_v_bf16mf4x5_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16_v_bf16mf2x5_mu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_v_bf16mf2x5_mu(vm, vd, rs1, vl); +} + +vbfloat16m1x5_t test_vlseg5e16_v_bf16m1x5_mu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_v_bf16m1x5_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vlseg5e16ff.c b/auto-generated/bfloat16/policy_funcs/api-testing/vlseg5e16ff.c new file mode 100644 index 000000000..04d061397 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vlseg5e16ff.c @@ -0,0 +1,82 @@ +#include +#include + +vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_bf16mf4x5_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_bf16mf2x5_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_tu(vbfloat16m1x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_bf16m1x5_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_tum(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_bf16mf4x5_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_tum(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_bf16mf2x5_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_tum(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_bf16m1x5_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_tumu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_bf16mf4x5_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_tumu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_bf16mf2x5_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_tumu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_bf16m1x5_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_mu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_bf16mf4x5_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_mu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_bf16mf2x5_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_mu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_bf16m1x5_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vlseg6e16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vlseg6e16.c new file mode 100644 index 000000000..143635f11 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vlseg6e16.c @@ -0,0 +1,68 @@ +#include +#include + +vbfloat16mf4x6_t test_vlseg6e16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_v_bf16mf4x6_tu(vd, rs1, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_v_bf16mf2x6_tu(vd, rs1, vl); +} + +vbfloat16m1x6_t test_vlseg6e16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_v_bf16m1x6_tu(vd, rs1, vl); +} + +vbfloat16mf4x6_t test_vlseg6e16_v_bf16mf4x6_tum(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_v_bf16mf4x6_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16_v_bf16mf2x6_tum(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_v_bf16mf2x6_tum(vm, vd, rs1, vl); +} + +vbfloat16m1x6_t test_vlseg6e16_v_bf16m1x6_tum(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_v_bf16m1x6_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4x6_t test_vlseg6e16_v_bf16mf4x6_tumu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_v_bf16mf4x6_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16_v_bf16mf2x6_tumu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_v_bf16mf2x6_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1x6_t test_vlseg6e16_v_bf16m1x6_tumu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_v_bf16m1x6_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4x6_t test_vlseg6e16_v_bf16mf4x6_mu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_v_bf16mf4x6_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16_v_bf16mf2x6_mu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_v_bf16mf2x6_mu(vm, vd, rs1, vl); +} + +vbfloat16m1x6_t test_vlseg6e16_v_bf16m1x6_mu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_v_bf16m1x6_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vlseg6e16ff.c b/auto-generated/bfloat16/policy_funcs/api-testing/vlseg6e16ff.c new file mode 100644 index 000000000..722c767fe --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vlseg6e16ff.c @@ -0,0 +1,82 @@ +#include +#include + +vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_bf16mf4x6_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_bf16mf2x6_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_tu(vbfloat16m1x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_bf16m1x6_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_tum(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_bf16mf4x6_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_tum(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_bf16mf2x6_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_tum(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_bf16m1x6_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_tumu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_bf16mf4x6_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_tumu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_bf16mf2x6_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_tumu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_bf16m1x6_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_mu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_bf16mf4x6_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_mu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_bf16mf2x6_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_mu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_bf16m1x6_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vlseg7e16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vlseg7e16.c new file mode 100644 index 000000000..cfc5711dd --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vlseg7e16.c @@ -0,0 +1,68 @@ +#include +#include + +vbfloat16mf4x7_t test_vlseg7e16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_v_bf16mf4x7_tu(vd, rs1, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_v_bf16mf2x7_tu(vd, rs1, vl); +} + +vbfloat16m1x7_t test_vlseg7e16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_v_bf16m1x7_tu(vd, rs1, vl); +} + +vbfloat16mf4x7_t test_vlseg7e16_v_bf16mf4x7_tum(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_v_bf16mf4x7_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16_v_bf16mf2x7_tum(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_v_bf16mf2x7_tum(vm, vd, rs1, vl); +} + +vbfloat16m1x7_t test_vlseg7e16_v_bf16m1x7_tum(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_v_bf16m1x7_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4x7_t test_vlseg7e16_v_bf16mf4x7_tumu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_v_bf16mf4x7_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16_v_bf16mf2x7_tumu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_v_bf16mf2x7_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1x7_t test_vlseg7e16_v_bf16m1x7_tumu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_v_bf16m1x7_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4x7_t test_vlseg7e16_v_bf16mf4x7_mu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_v_bf16mf4x7_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16_v_bf16mf2x7_mu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_v_bf16mf2x7_mu(vm, vd, rs1, vl); +} + +vbfloat16m1x7_t test_vlseg7e16_v_bf16m1x7_mu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_v_bf16m1x7_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vlseg7e16ff.c b/auto-generated/bfloat16/policy_funcs/api-testing/vlseg7e16ff.c new file mode 100644 index 000000000..d53541c21 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vlseg7e16ff.c @@ -0,0 +1,82 @@ +#include +#include + +vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_bf16mf4x7_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_bf16mf2x7_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_tu(vbfloat16m1x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_bf16m1x7_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_tum(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_bf16mf4x7_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_tum(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_bf16mf2x7_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_tum(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_bf16m1x7_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_tumu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_bf16mf4x7_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_tumu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_bf16mf2x7_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_tumu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_bf16m1x7_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_mu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_bf16mf4x7_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_mu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_bf16mf2x7_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_mu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_bf16m1x7_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vlseg8e16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vlseg8e16.c new file mode 100644 index 000000000..3294997eb --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vlseg8e16.c @@ -0,0 +1,68 @@ +#include +#include + +vbfloat16mf4x8_t test_vlseg8e16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_v_bf16mf4x8_tu(vd, rs1, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_v_bf16mf2x8_tu(vd, rs1, vl); +} + +vbfloat16m1x8_t test_vlseg8e16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_v_bf16m1x8_tu(vd, rs1, vl); +} + +vbfloat16mf4x8_t test_vlseg8e16_v_bf16mf4x8_tum(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_v_bf16mf4x8_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16_v_bf16mf2x8_tum(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_v_bf16mf2x8_tum(vm, vd, rs1, vl); +} + +vbfloat16m1x8_t test_vlseg8e16_v_bf16m1x8_tum(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_v_bf16m1x8_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4x8_t test_vlseg8e16_v_bf16mf4x8_tumu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_v_bf16mf4x8_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16_v_bf16mf2x8_tumu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_v_bf16mf2x8_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1x8_t test_vlseg8e16_v_bf16m1x8_tumu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_v_bf16m1x8_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4x8_t test_vlseg8e16_v_bf16mf4x8_mu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_v_bf16mf4x8_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16_v_bf16mf2x8_mu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_v_bf16mf2x8_mu(vm, vd, rs1, vl); +} + +vbfloat16m1x8_t test_vlseg8e16_v_bf16m1x8_mu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_v_bf16m1x8_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vlseg8e16ff.c b/auto-generated/bfloat16/policy_funcs/api-testing/vlseg8e16ff.c new file mode 100644 index 000000000..029dd6297 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vlseg8e16ff.c @@ -0,0 +1,82 @@ +#include +#include + +vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_bf16mf4x8_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_bf16mf2x8_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_tu(vbfloat16m1x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_bf16m1x8_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_tum(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_bf16mf4x8_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_tum(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_bf16mf2x8_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_tum(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_bf16m1x8_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_tumu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_bf16mf4x8_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_tumu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_bf16mf2x8_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_tumu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_bf16m1x8_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_mu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_bf16mf4x8_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_mu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_bf16mf2x8_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_mu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_bf16m1x8_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vlsseg2e16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vlsseg2e16.c new file mode 100644 index 000000000..e15d577ae --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vlsseg2e16.c @@ -0,0 +1,129 @@ +#include +#include + +vbfloat16mf4x2_t test_vlsseg2e16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16mf4x2_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vlsseg2e16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16mf2x2_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vlsseg2e16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_v_bf16m1x2_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vlsseg2e16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_v_bf16m2x2_tu(vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vlsseg2e16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_v_bf16m4x2_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vlsseg2e16_v_bf16mf4x2_tum(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16mf4x2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vlsseg2e16_v_bf16mf2x2_tum(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16mf2x2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vlsseg2e16_v_bf16m1x2_tum(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_v_bf16m1x2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vlsseg2e16_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_v_bf16m2x2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vlsseg2e16_v_bf16m4x2_tum(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_v_bf16m4x2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vlsseg2e16_v_bf16mf4x2_tumu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16mf4x2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vlsseg2e16_v_bf16mf2x2_tumu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16mf2x2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vlsseg2e16_v_bf16m1x2_tumu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16m1x2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vlsseg2e16_v_bf16m2x2_tumu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16m2x2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vlsseg2e16_v_bf16m4x2_tumu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16m4x2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vlsseg2e16_v_bf16mf4x2_mu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16mf4x2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vlsseg2e16_v_bf16mf2x2_mu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16mf2x2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vlsseg2e16_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_v_bf16m1x2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vlsseg2e16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_v_bf16m2x2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vlsseg2e16_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_v_bf16m4x2_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vlsseg3e16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vlsseg3e16.c new file mode 100644 index 000000000..65cbc96f4 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vlsseg3e16.c @@ -0,0 +1,105 @@ +#include +#include + +vbfloat16mf4x3_t test_vlsseg3e16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_bf16mf4x3_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vlsseg3e16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_bf16mf2x3_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vlsseg3e16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_v_bf16m1x3_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vlsseg3e16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_v_bf16m2x3_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vlsseg3e16_v_bf16mf4x3_tum(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_bf16mf4x3_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vlsseg3e16_v_bf16mf2x3_tum(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_bf16mf2x3_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vlsseg3e16_v_bf16m1x3_tum(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_v_bf16m1x3_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vlsseg3e16_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_v_bf16m2x3_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vlsseg3e16_v_bf16mf4x3_tumu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_bf16mf4x3_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vlsseg3e16_v_bf16mf2x3_tumu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_bf16mf2x3_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vlsseg3e16_v_bf16m1x3_tumu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_bf16m1x3_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vlsseg3e16_v_bf16m2x3_tumu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_bf16m2x3_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vlsseg3e16_v_bf16mf4x3_mu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_bf16mf4x3_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vlsseg3e16_v_bf16mf2x3_mu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_bf16mf2x3_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vlsseg3e16_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_v_bf16m1x3_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vlsseg3e16_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_v_bf16m2x3_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vlsseg4e16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vlsseg4e16.c new file mode 100644 index 000000000..7721cf1d2 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vlsseg4e16.c @@ -0,0 +1,105 @@ +#include +#include + +vbfloat16mf4x4_t test_vlsseg4e16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_bf16mf4x4_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vlsseg4e16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_bf16mf2x4_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vlsseg4e16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_v_bf16m1x4_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vlsseg4e16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_v_bf16m2x4_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vlsseg4e16_v_bf16mf4x4_tum(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_bf16mf4x4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vlsseg4e16_v_bf16mf2x4_tum(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_bf16mf2x4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vlsseg4e16_v_bf16m1x4_tum(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_v_bf16m1x4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vlsseg4e16_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_v_bf16m2x4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vlsseg4e16_v_bf16mf4x4_tumu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_bf16mf4x4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vlsseg4e16_v_bf16mf2x4_tumu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_bf16mf2x4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vlsseg4e16_v_bf16m1x4_tumu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_bf16m1x4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vlsseg4e16_v_bf16m2x4_tumu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_bf16m2x4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vlsseg4e16_v_bf16mf4x4_mu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_bf16mf4x4_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vlsseg4e16_v_bf16mf2x4_mu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_bf16mf2x4_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vlsseg4e16_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_v_bf16m1x4_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vlsseg4e16_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_v_bf16m2x4_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vlsseg5e16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vlsseg5e16.c new file mode 100644 index 000000000..d6df0b2bd --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vlsseg5e16.c @@ -0,0 +1,81 @@ +#include +#include + +vbfloat16mf4x5_t test_vlsseg5e16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_bf16mf4x5_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vlsseg5e16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_bf16mf2x5_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vlsseg5e16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg5e16_v_bf16m1x5_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vlsseg5e16_v_bf16mf4x5_tum(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_bf16mf4x5_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vlsseg5e16_v_bf16mf2x5_tum(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_bf16mf2x5_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vlsseg5e16_v_bf16m1x5_tum(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg5e16_v_bf16m1x5_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vlsseg5e16_v_bf16mf4x5_tumu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_bf16mf4x5_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vlsseg5e16_v_bf16mf2x5_tumu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_bf16mf2x5_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vlsseg5e16_v_bf16m1x5_tumu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_bf16m1x5_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vlsseg5e16_v_bf16mf4x5_mu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_bf16mf4x5_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vlsseg5e16_v_bf16mf2x5_mu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_bf16mf2x5_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vlsseg5e16_v_bf16m1x5_mu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg5e16_v_bf16m1x5_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vlsseg6e16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vlsseg6e16.c new file mode 100644 index 000000000..27c1cbd88 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vlsseg6e16.c @@ -0,0 +1,81 @@ +#include +#include + +vbfloat16mf4x6_t test_vlsseg6e16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_bf16mf4x6_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vlsseg6e16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_bf16mf2x6_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vlsseg6e16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg6e16_v_bf16m1x6_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vlsseg6e16_v_bf16mf4x6_tum(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_bf16mf4x6_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vlsseg6e16_v_bf16mf2x6_tum(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_bf16mf2x6_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vlsseg6e16_v_bf16m1x6_tum(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg6e16_v_bf16m1x6_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vlsseg6e16_v_bf16mf4x6_tumu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_bf16mf4x6_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vlsseg6e16_v_bf16mf2x6_tumu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_bf16mf2x6_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vlsseg6e16_v_bf16m1x6_tumu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_bf16m1x6_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vlsseg6e16_v_bf16mf4x6_mu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_bf16mf4x6_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vlsseg6e16_v_bf16mf2x6_mu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_bf16mf2x6_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vlsseg6e16_v_bf16m1x6_mu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg6e16_v_bf16m1x6_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vlsseg7e16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vlsseg7e16.c new file mode 100644 index 000000000..872b2f0d0 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vlsseg7e16.c @@ -0,0 +1,81 @@ +#include +#include + +vbfloat16mf4x7_t test_vlsseg7e16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_bf16mf4x7_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vlsseg7e16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_bf16mf2x7_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vlsseg7e16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg7e16_v_bf16m1x7_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vlsseg7e16_v_bf16mf4x7_tum(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_bf16mf4x7_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vlsseg7e16_v_bf16mf2x7_tum(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_bf16mf2x7_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vlsseg7e16_v_bf16m1x7_tum(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg7e16_v_bf16m1x7_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vlsseg7e16_v_bf16mf4x7_tumu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_bf16mf4x7_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vlsseg7e16_v_bf16mf2x7_tumu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_bf16mf2x7_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vlsseg7e16_v_bf16m1x7_tumu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_bf16m1x7_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vlsseg7e16_v_bf16mf4x7_mu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_bf16mf4x7_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vlsseg7e16_v_bf16mf2x7_mu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_bf16mf2x7_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vlsseg7e16_v_bf16m1x7_mu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg7e16_v_bf16m1x7_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vlsseg8e16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vlsseg8e16.c new file mode 100644 index 000000000..cee5491c5 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vlsseg8e16.c @@ -0,0 +1,81 @@ +#include +#include + +vbfloat16mf4x8_t test_vlsseg8e16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_bf16mf4x8_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vlsseg8e16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_bf16mf2x8_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vlsseg8e16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg8e16_v_bf16m1x8_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vlsseg8e16_v_bf16mf4x8_tum(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_bf16mf4x8_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vlsseg8e16_v_bf16mf2x8_tum(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_bf16mf2x8_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vlsseg8e16_v_bf16m1x8_tum(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg8e16_v_bf16m1x8_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vlsseg8e16_v_bf16mf4x8_tumu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_bf16mf4x8_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vlsseg8e16_v_bf16mf2x8_tumu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_bf16mf2x8_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vlsseg8e16_v_bf16m1x8_tumu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_bf16m1x8_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vlsseg8e16_v_bf16mf4x8_mu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_bf16mf4x8_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vlsseg8e16_v_bf16mf2x8_mu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_bf16mf2x8_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vlsseg8e16_v_bf16m1x8_mu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg8e16_v_bf16m1x8_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vluxei16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vluxei16.c new file mode 100644 index 000000000..2b61e3f6d --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vluxei16.c @@ -0,0 +1,140 @@ +#include +#include + +vbfloat16mf4_t test_vluxei16_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_bf16mf4_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vluxei16_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_bf16mf2_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vluxei16_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_bf16m1_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vluxei16_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_bf16m2_tu(vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vluxei16_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_bf16m4_tu(vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vluxei16_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, + vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_bf16m8_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vluxei16_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16mf4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vluxei16_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16mf2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vluxei16_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m1_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vluxei16_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vluxei16_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vluxei16_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m8_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vluxei16_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16mf4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vluxei16_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16mf2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vluxei16_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m1_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vluxei16_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vluxei16_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vluxei16_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m8_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vluxei16_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16mf4_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vluxei16_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16mf2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vluxei16_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m1_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vluxei16_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vluxei16_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m4_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vluxei16_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m8_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vluxseg2ei16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vluxseg2ei16.c new file mode 100644 index 000000000..4c4852bf6 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vluxseg2ei16.c @@ -0,0 +1,139 @@ +#include +#include + +vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16mf4x2_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16mf2x2_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m1x2_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m2x2_tu(vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m4x2_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tum(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg2ei16_v_bf16mf4x2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tum(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg2ei16_v_bf16mf2x2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tum(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m1x2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tum(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m2x2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tum(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m4x2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tumu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg2ei16_v_bf16mf4x2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tumu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg2ei16_v_bf16mf2x2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tumu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m1x2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tumu(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m2x2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tumu(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m4x2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_mu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16mf4x2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_mu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16mf2x2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_mu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m1x2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m2x2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m4x2_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vluxseg3ei16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vluxseg3ei16.c new file mode 100644 index 000000000..2ddb3a2ff --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vluxseg3ei16.c @@ -0,0 +1,113 @@ +#include +#include + +vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16mf4x3_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16mf2x3_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16m1x3_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16m2x3_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tum(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg3ei16_v_bf16mf4x3_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tum(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg3ei16_v_bf16mf2x3_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tum(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16m1x3_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tum(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16m2x3_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tumu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg3ei16_v_bf16mf4x3_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tumu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg3ei16_v_bf16mf2x3_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tumu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16m1x3_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tumu(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16m2x3_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_mu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16mf4x3_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_mu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16mf2x3_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_mu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16m1x3_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16m2x3_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vluxseg4ei16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vluxseg4ei16.c new file mode 100644 index 000000000..c26f49f3d --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vluxseg4ei16.c @@ -0,0 +1,113 @@ +#include +#include + +vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16mf4x4_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16mf2x4_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16m1x4_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16m2x4_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tum(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg4ei16_v_bf16mf4x4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tum(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg4ei16_v_bf16mf2x4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tum(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16m1x4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tum(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16m2x4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tumu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg4ei16_v_bf16mf4x4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tumu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg4ei16_v_bf16mf2x4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tumu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16m1x4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tumu(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16m2x4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_mu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16mf4x4_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_mu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16mf2x4_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_mu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16m1x4_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16m2x4_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vluxseg5ei16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vluxseg5ei16.c new file mode 100644 index 000000000..10e15cfcf --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vluxseg5ei16.c @@ -0,0 +1,87 @@ +#include +#include + +vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_bf16mf4x5_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_bf16mf2x5_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_bf16m1x5_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tum(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg5ei16_v_bf16mf4x5_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tum(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg5ei16_v_bf16mf2x5_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tum(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_bf16m1x5_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tumu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg5ei16_v_bf16mf4x5_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tumu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg5ei16_v_bf16mf2x5_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tumu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_bf16m1x5_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_mu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_bf16mf4x5_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_mu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_bf16mf2x5_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_mu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_bf16m1x5_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vluxseg6ei16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vluxseg6ei16.c new file mode 100644 index 000000000..618ec0ca1 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vluxseg6ei16.c @@ -0,0 +1,87 @@ +#include +#include + +vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_bf16mf4x6_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_bf16mf2x6_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_bf16m1x6_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tum(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg6ei16_v_bf16mf4x6_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tum(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg6ei16_v_bf16mf2x6_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tum(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_bf16m1x6_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tumu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg6ei16_v_bf16mf4x6_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tumu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg6ei16_v_bf16mf2x6_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tumu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_bf16m1x6_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_mu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_bf16mf4x6_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_mu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_bf16mf2x6_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_mu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_bf16m1x6_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vluxseg7ei16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vluxseg7ei16.c new file mode 100644 index 000000000..aca74804f --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vluxseg7ei16.c @@ -0,0 +1,87 @@ +#include +#include + +vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_bf16mf4x7_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_bf16mf2x7_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_bf16m1x7_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tum(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg7ei16_v_bf16mf4x7_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tum(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg7ei16_v_bf16mf2x7_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tum(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_bf16m1x7_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tumu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg7ei16_v_bf16mf4x7_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tumu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg7ei16_v_bf16mf2x7_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tumu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_bf16m1x7_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_mu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_bf16mf4x7_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_mu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_bf16mf2x7_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_mu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_bf16m1x7_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vluxseg8ei16.c b/auto-generated/bfloat16/policy_funcs/api-testing/vluxseg8ei16.c new file mode 100644 index 000000000..9c7f8a09e --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vluxseg8ei16.c @@ -0,0 +1,87 @@ +#include +#include + +vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_bf16mf4x8_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_bf16mf2x8_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_bf16m1x8_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tum(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg8ei16_v_bf16mf4x8_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tum(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg8ei16_v_bf16mf2x8_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tum(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_bf16m1x8_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tumu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg8ei16_v_bf16mf4x8_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tumu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg8ei16_v_bf16mf2x8_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tumu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_bf16m1x8_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_mu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_bf16mf4x8_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_mu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_bf16mf2x8_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_mu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_bf16m1x8_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vmerge.c b/auto-generated/bfloat16/policy_funcs/api-testing/vmerge.c new file mode 100644 index 000000000..9e28ee542 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vmerge.c @@ -0,0 +1,38 @@ +#include +#include + +vbfloat16mf4_t test_vmerge_vvm_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, vbool64_t v0, + size_t vl) { + return __riscv_vmerge_vvm_bf16mf4_tu(vd, vs2, vs1, v0, vl); +} + +vbfloat16mf2_t test_vmerge_vvm_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, vbool32_t v0, + size_t vl) { + return __riscv_vmerge_vvm_bf16mf2_tu(vd, vs2, vs1, v0, vl); +} + +vbfloat16m1_t test_vmerge_vvm_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, vbool16_t v0, + size_t vl) { + return __riscv_vmerge_vvm_bf16m1_tu(vd, vs2, vs1, v0, vl); +} + +vbfloat16m2_t test_vmerge_vvm_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, vbool8_t v0, + size_t vl) { + return __riscv_vmerge_vvm_bf16m2_tu(vd, vs2, vs1, v0, vl); +} + +vbfloat16m4_t test_vmerge_vvm_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, vbool4_t v0, + size_t vl) { + return __riscv_vmerge_vvm_bf16m4_tu(vd, vs2, vs1, v0, vl); +} + +vbfloat16m8_t test_vmerge_vvm_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs2, + vbfloat16m8_t vs1, vbool2_t v0, + size_t vl) { + return __riscv_vmerge_vvm_bf16m8_tu(vd, vs2, vs1, v0, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/api-testing/vmv.c b/auto-generated/bfloat16/policy_funcs/api-testing/vmv.c new file mode 100644 index 000000000..c1bb53556 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/api-testing/vmv.c @@ -0,0 +1,32 @@ +#include +#include + +vbfloat16mf4_t test_vmv_v_v_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vmv_v_v_bf16mf4_tu(vd, vs1, vl); +} + +vbfloat16mf2_t test_vmv_v_v_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vmv_v_v_bf16mf2_tu(vd, vs1, vl); +} + +vbfloat16m1_t test_vmv_v_v_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vmv_v_v_bf16m1_tu(vd, vs1, vl); +} + +vbfloat16m2_t test_vmv_v_v_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vmv_v_v_bf16m2_tu(vd, vs1, vl); +} + +vbfloat16m4_t test_vmv_v_v_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vmv_v_v_bf16m4_tu(vd, vs1, vl); +} + +vbfloat16m8_t test_vmv_v_v_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, + size_t vl) { + return __riscv_vmv_v_v_bf16m8_tu(vd, vs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/intrinsic_funcs.adoc b/auto-generated/bfloat16/policy_funcs/intrinsic_funcs.adoc new file mode 100644 index 000000000..15ebdc590 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/intrinsic_funcs.adoc @@ -0,0 +1,2932 @@ + +=== BFloat16 Vector Loads and Stores Intrinsics + +[[policy-variant-bf16-vector-unit-stride-load]] +==== Vector Unit-Stride Load Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vle16_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf2_t __riscv_vle16_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m1_t __riscv_vle16_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m2_t __riscv_vle16_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m4_t __riscv_vle16_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m8_t __riscv_vle16_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vle16_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2_t __riscv_vle16_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1_t __riscv_vle16_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2_t __riscv_vle16_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m4_t __riscv_vle16_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m8_t __riscv_vle16_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vle16_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2_t __riscv_vle16_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1_t __riscv_vle16_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2_t __riscv_vle16_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m4_t __riscv_vle16_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m8_t __riscv_vle16_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vle16_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2_t __riscv_vle16_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1_t __riscv_vle16_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2_t __riscv_vle16_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m4_t __riscv_vle16_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m8_t __riscv_vle16_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t vl); +---- + +[[policy-variant-bf16-vector-unit-stride-store]] +==== Vector Unit-Stride Store Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-vector-strided-load]] +==== Vector Strided Load Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vlse16_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vlse16_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1_t __riscv_vlse16_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2_t __riscv_vlse16_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m4_t __riscv_vlse16_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m8_t __riscv_vlse16_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vlse16_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2_t __riscv_vlse16_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1_t __riscv_vlse16_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vlse16_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vlse16_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vlse16_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vlse16_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2_t __riscv_vlse16_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1_t __riscv_vlse16_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vlse16_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vlse16_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vlse16_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vlse16_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2_t __riscv_vlse16_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1_t __riscv_vlse16_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vlse16_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vlse16_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vlse16_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +---- + +[[policy-variant-vector-strided-store]] +==== Vector Strided Store Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-vector-indexed-load]] +==== Vector Indexed Load Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vloxei16_v_bf16mf4_tu(vbfloat16mf4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vloxei16_v_bf16mf2_tu(vbfloat16mf2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vloxei16_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2_t __riscv_vloxei16_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4_t __riscv_vloxei16_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +vbfloat16m8_t __riscv_vloxei16_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, + vuint16m8_t rs2, size_t vl); +vbfloat16mf4_t __riscv_vluxei16_v_bf16mf4_tu(vbfloat16mf4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vluxei16_v_bf16mf2_tu(vbfloat16mf2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vluxei16_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2_t __riscv_vluxei16_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4_t __riscv_vluxei16_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +vbfloat16m8_t __riscv_vluxei16_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, + vuint16m8_t rs2, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vloxei16_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vloxei16_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vloxei16_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vloxei16_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vloxei16_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vloxei16_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +vbfloat16mf4_t __riscv_vluxei16_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vluxei16_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vluxei16_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vluxei16_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vluxei16_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vluxei16_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vloxei16_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vloxei16_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vloxei16_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vloxei16_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vloxei16_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vloxei16_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +vbfloat16mf4_t __riscv_vluxei16_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vluxei16_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vluxei16_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vluxei16_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vluxei16_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vluxei16_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vloxei16_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vloxei16_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vloxei16_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vloxei16_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vloxei16_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vloxei16_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +vbfloat16mf4_t __riscv_vluxei16_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vluxei16_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vluxei16_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vluxei16_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vluxei16_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vluxei16_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +---- + +[[policy-variant-vector-indexed-store]] +==== Vector Indexed Store Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-unit-stride-fault-only-first-loads]] +==== Unit-stride Fault-Only-First Loads Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vle16ff_v_bf16mf4_tu(vbfloat16mf4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2_t __riscv_vle16ff_v_bf16mf2_tu(vbfloat16mf2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1_t __riscv_vle16ff_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2_t __riscv_vle16ff_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m4_t __riscv_vle16ff_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m8_t __riscv_vle16ff_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vle16ff_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2_t __riscv_vle16ff_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1_t __riscv_vle16ff_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m2_t __riscv_vle16ff_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m4_t __riscv_vle16ff_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m8_t __riscv_vle16ff_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vle16ff_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2_t __riscv_vle16ff_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1_t __riscv_vle16ff_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m2_t __riscv_vle16ff_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m4_t __riscv_vle16ff_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m8_t __riscv_vle16ff_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vle16ff_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2_t __riscv_vle16ff_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1_t __riscv_vle16ff_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m2_t __riscv_vle16ff_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m4_t __riscv_vle16ff_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m8_t __riscv_vle16ff_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +---- + +=== BFloat16 Vector Loads and Stores Segment Intrinsics + +[[policy-variant-vector-unit-stride-segment-load]] +==== Vector Unit-Stride Segment Load Intrinsics + +[,c] +---- +vbfloat16mf4x2_t __riscv_vlseg2e16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x2_t __riscv_vlseg2e16ff_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16ff_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16ff_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16ff_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16ff_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16ff_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16ff_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16ff_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16ff_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16ff_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16ff_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16ff_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16ff_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16ff_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16ff_v_bf16m1x2_tu(vbfloat16m1x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16ff_v_bf16m1x3_tu(vbfloat16m1x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16ff_v_bf16m1x4_tu(vbfloat16m1x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16ff_v_bf16m1x5_tu(vbfloat16m1x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16ff_v_bf16m1x6_tu(vbfloat16m1x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16ff_v_bf16m1x7_tu(vbfloat16m1x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16ff_v_bf16m1x8_tu(vbfloat16m1x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16ff_v_bf16m2x2_tu(vbfloat16m2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16ff_v_bf16m2x3_tu(vbfloat16m2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16ff_v_bf16m2x4_tu(vbfloat16m2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16ff_v_bf16m4x2_tu(vbfloat16m4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vlseg2e16_v_bf16mf4x2_tum(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16_v_bf16mf4x3_tum(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16_v_bf16mf4x4_tum(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16_v_bf16mf4x5_tum(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16_v_bf16mf4x6_tum(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16_v_bf16mf4x7_tum(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16_v_bf16mf4x8_tum(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16_v_bf16mf2x2_tum(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16_v_bf16mf2x3_tum(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16_v_bf16mf2x4_tum(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16_v_bf16mf2x5_tum(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16_v_bf16mf2x6_tum(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16_v_bf16mf2x7_tum(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16_v_bf16mf2x8_tum(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16_v_bf16m1x2_tum(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16_v_bf16m1x3_tum(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16_v_bf16m1x4_tum(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16_v_bf16m1x5_tum(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16_v_bf16m1x6_tum(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16_v_bf16m1x7_tum(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16_v_bf16m1x8_tum(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16_v_bf16m2x2_tum(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16_v_bf16m2x3_tum(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16_v_bf16m2x4_tum(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16_v_bf16m4x2_tum(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x2_t __riscv_vlseg2e16ff_v_bf16mf4x2_tum(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16ff_v_bf16mf4x3_tum(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16ff_v_bf16mf4x4_tum(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16ff_v_bf16mf4x5_tum(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16ff_v_bf16mf4x6_tum(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16ff_v_bf16mf4x7_tum(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16ff_v_bf16mf4x8_tum(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16ff_v_bf16mf2x2_tum(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16ff_v_bf16mf2x3_tum(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16ff_v_bf16mf2x4_tum(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16ff_v_bf16mf2x5_tum(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16ff_v_bf16mf2x6_tum(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16ff_v_bf16mf2x7_tum(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16ff_v_bf16mf2x8_tum(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16ff_v_bf16m1x2_tum(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16ff_v_bf16m1x3_tum(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16ff_v_bf16m1x4_tum(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16ff_v_bf16m1x5_tum(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16ff_v_bf16m1x6_tum(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16ff_v_bf16m1x7_tum(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16ff_v_bf16m1x8_tum(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16ff_v_bf16m2x2_tum(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16ff_v_bf16m2x3_tum(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16ff_v_bf16m2x4_tum(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16ff_v_bf16m4x2_tum(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vlseg2e16_v_bf16mf4x2_tumu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16_v_bf16mf4x3_tumu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16_v_bf16mf4x4_tumu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16_v_bf16mf4x5_tumu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16_v_bf16mf4x6_tumu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16_v_bf16mf4x7_tumu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16_v_bf16mf4x8_tumu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16_v_bf16mf2x2_tumu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16_v_bf16mf2x3_tumu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16_v_bf16mf2x4_tumu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16_v_bf16mf2x5_tumu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16_v_bf16mf2x6_tumu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16_v_bf16mf2x7_tumu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16_v_bf16mf2x8_tumu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16_v_bf16m1x2_tumu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16_v_bf16m1x3_tumu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16_v_bf16m1x4_tumu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16_v_bf16m1x5_tumu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16_v_bf16m1x6_tumu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16_v_bf16m1x7_tumu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16_v_bf16m1x8_tumu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16_v_bf16m2x2_tumu(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16_v_bf16m2x3_tumu(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16_v_bf16m2x4_tumu(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16_v_bf16m4x2_tumu(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x2_t __riscv_vlseg2e16ff_v_bf16mf4x2_tumu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, + size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16ff_v_bf16mf4x3_tumu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + size_t *new_vl, + size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16ff_v_bf16mf4x4_tumu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + size_t *new_vl, + size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16ff_v_bf16mf4x5_tumu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + size_t *new_vl, + size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16ff_v_bf16mf4x6_tumu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + size_t *new_vl, + size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16ff_v_bf16mf4x7_tumu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + size_t *new_vl, + size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16ff_v_bf16mf4x8_tumu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + size_t *new_vl, + size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16ff_v_bf16mf2x2_tumu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, + size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16ff_v_bf16mf2x3_tumu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, + size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16ff_v_bf16mf2x4_tumu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, + size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16ff_v_bf16mf2x5_tumu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + size_t *new_vl, + size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16ff_v_bf16mf2x6_tumu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + size_t *new_vl, + size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16ff_v_bf16mf2x7_tumu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + size_t *new_vl, + size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16ff_v_bf16mf2x8_tumu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + size_t *new_vl, + size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16ff_v_bf16m1x2_tumu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16ff_v_bf16m1x3_tumu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16ff_v_bf16m1x4_tumu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16ff_v_bf16m1x5_tumu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16ff_v_bf16m1x6_tumu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16ff_v_bf16m1x7_tumu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16ff_v_bf16m1x8_tumu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16ff_v_bf16m2x2_tumu(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16ff_v_bf16m2x3_tumu(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16ff_v_bf16m2x4_tumu(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16ff_v_bf16m4x2_tumu(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vlseg2e16_v_bf16mf4x2_mu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16_v_bf16mf4x3_mu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16_v_bf16mf4x4_mu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16_v_bf16mf4x5_mu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16_v_bf16mf4x6_mu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16_v_bf16mf4x7_mu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16_v_bf16mf4x8_mu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16_v_bf16mf2x2_mu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16_v_bf16mf2x3_mu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16_v_bf16mf2x4_mu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16_v_bf16mf2x5_mu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16_v_bf16mf2x6_mu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16_v_bf16mf2x7_mu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16_v_bf16mf2x8_mu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16_v_bf16m1x2_mu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16_v_bf16m1x3_mu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16_v_bf16m1x4_mu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16_v_bf16m1x5_mu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16_v_bf16m1x6_mu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16_v_bf16m1x7_mu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16_v_bf16m1x8_mu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x2_t __riscv_vlseg2e16ff_v_bf16mf4x2_mu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16ff_v_bf16mf4x3_mu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16ff_v_bf16mf4x4_mu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16ff_v_bf16mf4x5_mu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16ff_v_bf16mf4x6_mu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16ff_v_bf16mf4x7_mu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16ff_v_bf16mf4x8_mu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16ff_v_bf16mf2x2_mu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16ff_v_bf16mf2x3_mu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16ff_v_bf16mf2x4_mu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16ff_v_bf16mf2x5_mu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16ff_v_bf16mf2x6_mu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16ff_v_bf16mf2x7_mu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16ff_v_bf16mf2x8_mu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16ff_v_bf16m1x2_mu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16ff_v_bf16m1x3_mu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16ff_v_bf16m1x4_mu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16ff_v_bf16m1x5_mu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16ff_v_bf16m1x6_mu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16ff_v_bf16m1x7_mu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16ff_v_bf16m1x8_mu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16ff_v_bf16m2x2_mu(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16ff_v_bf16m2x3_mu(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16ff_v_bf16m2x4_mu(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16ff_v_bf16m4x2_mu(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +---- + +[[policy-variant-vecrtor-unit-stride-segment-store]] +==== Vector Unit-Stride Segment Store Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-vector-strided-segment-load]] +==== Vector Strided Segment Load Intrinsics + +[,c] +---- +vbfloat16mf4x2_t __riscv_vlsseg2e16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x3_t __riscv_vlsseg3e16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x4_t __riscv_vlsseg4e16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x5_t __riscv_vlsseg5e16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x6_t __riscv_vlsseg6e16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x7_t __riscv_vlsseg7e16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x8_t __riscv_vlsseg8e16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x2_t __riscv_vlsseg2e16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x3_t __riscv_vlsseg3e16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x4_t __riscv_vlsseg4e16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x5_t __riscv_vlsseg5e16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x6_t __riscv_vlsseg6e16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x7_t __riscv_vlsseg7e16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x8_t __riscv_vlsseg8e16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x2_t __riscv_vlsseg2e16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vlsseg3e16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vlsseg4e16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vlsseg5e16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vlsseg6e16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vlsseg7e16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vlsseg8e16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vlsseg2e16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vlsseg3e16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vlsseg4e16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vlsseg2e16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vlsseg2e16_v_bf16mf4x2_tum(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x3_t __riscv_vlsseg3e16_v_bf16mf4x3_tum(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x4_t __riscv_vlsseg4e16_v_bf16mf4x4_tum(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x5_t __riscv_vlsseg5e16_v_bf16mf4x5_tum(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x6_t __riscv_vlsseg6e16_v_bf16mf4x6_tum(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x7_t __riscv_vlsseg7e16_v_bf16mf4x7_tum(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x8_t __riscv_vlsseg8e16_v_bf16mf4x8_tum(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x2_t __riscv_vlsseg2e16_v_bf16mf2x2_tum(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x3_t __riscv_vlsseg3e16_v_bf16mf2x3_tum(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x4_t __riscv_vlsseg4e16_v_bf16mf2x4_tum(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x5_t __riscv_vlsseg5e16_v_bf16mf2x5_tum(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x6_t __riscv_vlsseg6e16_v_bf16mf2x6_tum(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x7_t __riscv_vlsseg7e16_v_bf16mf2x7_tum(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x8_t __riscv_vlsseg8e16_v_bf16mf2x8_tum(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x2_t __riscv_vlsseg2e16_v_bf16m1x2_tum(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vlsseg3e16_v_bf16m1x3_tum(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vlsseg4e16_v_bf16m1x4_tum(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vlsseg5e16_v_bf16m1x5_tum(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vlsseg6e16_v_bf16m1x6_tum(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vlsseg7e16_v_bf16m1x7_tum(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vlsseg8e16_v_bf16m1x8_tum(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vlsseg2e16_v_bf16m2x2_tum(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vlsseg3e16_v_bf16m2x3_tum(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vlsseg4e16_v_bf16m2x4_tum(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vlsseg2e16_v_bf16m4x2_tum(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vlsseg2e16_v_bf16mf4x2_tumu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x3_t __riscv_vlsseg3e16_v_bf16mf4x3_tumu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x4_t __riscv_vlsseg4e16_v_bf16mf4x4_tumu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x5_t __riscv_vlsseg5e16_v_bf16mf4x5_tumu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x6_t __riscv_vlsseg6e16_v_bf16mf4x6_tumu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x7_t __riscv_vlsseg7e16_v_bf16mf4x7_tumu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x8_t __riscv_vlsseg8e16_v_bf16mf4x8_tumu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x2_t __riscv_vlsseg2e16_v_bf16mf2x2_tumu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x3_t __riscv_vlsseg3e16_v_bf16mf2x3_tumu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x4_t __riscv_vlsseg4e16_v_bf16mf2x4_tumu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x5_t __riscv_vlsseg5e16_v_bf16mf2x5_tumu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x6_t __riscv_vlsseg6e16_v_bf16mf2x6_tumu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x7_t __riscv_vlsseg7e16_v_bf16mf2x7_tumu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x8_t __riscv_vlsseg8e16_v_bf16mf2x8_tumu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x2_t __riscv_vlsseg2e16_v_bf16m1x2_tumu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vlsseg3e16_v_bf16m1x3_tumu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vlsseg4e16_v_bf16m1x4_tumu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vlsseg5e16_v_bf16m1x5_tumu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vlsseg6e16_v_bf16m1x6_tumu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vlsseg7e16_v_bf16m1x7_tumu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vlsseg8e16_v_bf16m1x8_tumu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vlsseg2e16_v_bf16m2x2_tumu(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vlsseg3e16_v_bf16m2x3_tumu(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vlsseg4e16_v_bf16m2x4_tumu(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vlsseg2e16_v_bf16m4x2_tumu(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vlsseg2e16_v_bf16mf4x2_mu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x3_t __riscv_vlsseg3e16_v_bf16mf4x3_mu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x4_t __riscv_vlsseg4e16_v_bf16mf4x4_mu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x5_t __riscv_vlsseg5e16_v_bf16mf4x5_mu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x6_t __riscv_vlsseg6e16_v_bf16mf4x6_mu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x7_t __riscv_vlsseg7e16_v_bf16mf4x7_mu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x8_t __riscv_vlsseg8e16_v_bf16mf4x8_mu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x2_t __riscv_vlsseg2e16_v_bf16mf2x2_mu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x3_t __riscv_vlsseg3e16_v_bf16mf2x3_mu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x4_t __riscv_vlsseg4e16_v_bf16mf2x4_mu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x5_t __riscv_vlsseg5e16_v_bf16mf2x5_mu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x6_t __riscv_vlsseg6e16_v_bf16mf2x6_mu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x7_t __riscv_vlsseg7e16_v_bf16mf2x7_mu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x8_t __riscv_vlsseg8e16_v_bf16mf2x8_mu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x2_t __riscv_vlsseg2e16_v_bf16m1x2_mu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vlsseg3e16_v_bf16m1x3_mu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vlsseg4e16_v_bf16m1x4_mu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vlsseg5e16_v_bf16m1x5_mu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vlsseg6e16_v_bf16m1x6_mu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vlsseg7e16_v_bf16m1x7_mu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vlsseg8e16_v_bf16m1x8_mu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vlsseg2e16_v_bf16m2x2_mu(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vlsseg3e16_v_bf16m2x3_mu(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vlsseg4e16_v_bf16m2x4_mu(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vlsseg2e16_v_bf16m4x2_mu(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +---- + +[[policy-variant-vector-strided-segment-store]] +==== Vector Strided Segment Store Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-vector-indexed-segment-load]] +==== Vector Indexed Segment Load Intrinsics + +[,c] +---- +vbfloat16mf4x2_t __riscv_vloxseg2ei16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vloxseg3ei16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vloxseg4ei16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vloxseg5ei16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vloxseg6ei16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vloxseg7ei16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vloxseg8ei16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vloxseg2ei16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vloxseg3ei16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vloxseg4ei16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vloxseg5ei16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vloxseg6ei16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vloxseg7ei16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vloxseg8ei16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vloxseg2ei16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vloxseg3ei16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vloxseg4ei16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vloxseg5ei16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vloxseg6ei16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vloxseg7ei16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vloxseg8ei16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vloxseg2ei16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vloxseg3ei16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vloxseg4ei16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vloxseg2ei16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +vbfloat16mf4x2_t __riscv_vluxseg2ei16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vluxseg3ei16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vluxseg4ei16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vluxseg5ei16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vluxseg6ei16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vluxseg7ei16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vluxseg8ei16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vluxseg2ei16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vluxseg3ei16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vluxseg4ei16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vluxseg5ei16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vluxseg6ei16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vluxseg7ei16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vluxseg8ei16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vluxseg2ei16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vluxseg3ei16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vluxseg4ei16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vluxseg5ei16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vluxseg6ei16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vluxseg7ei16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vluxseg8ei16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vluxseg2ei16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vluxseg3ei16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vluxseg4ei16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vluxseg2ei16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vloxseg2ei16_v_bf16mf4x2_tum(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vloxseg3ei16_v_bf16mf4x3_tum(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vloxseg4ei16_v_bf16mf4x4_tum(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vloxseg5ei16_v_bf16mf4x5_tum(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vloxseg6ei16_v_bf16mf4x6_tum(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vloxseg7ei16_v_bf16mf4x7_tum(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vloxseg8ei16_v_bf16mf4x8_tum(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vloxseg2ei16_v_bf16mf2x2_tum(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vloxseg3ei16_v_bf16mf2x3_tum(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vloxseg4ei16_v_bf16mf2x4_tum(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vloxseg5ei16_v_bf16mf2x5_tum(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vloxseg6ei16_v_bf16mf2x6_tum(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vloxseg7ei16_v_bf16mf2x7_tum(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vloxseg8ei16_v_bf16mf2x8_tum(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vloxseg2ei16_v_bf16m1x2_tum(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vloxseg3ei16_v_bf16m1x3_tum(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vloxseg4ei16_v_bf16m1x4_tum(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vloxseg5ei16_v_bf16m1x5_tum(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vloxseg6ei16_v_bf16m1x6_tum(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vloxseg7ei16_v_bf16m1x7_tum(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vloxseg8ei16_v_bf16m1x8_tum(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vloxseg2ei16_v_bf16m2x2_tum(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vloxseg3ei16_v_bf16m2x3_tum(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vloxseg4ei16_v_bf16m2x4_tum(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vloxseg2ei16_v_bf16m4x2_tum(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +vbfloat16mf4x2_t __riscv_vluxseg2ei16_v_bf16mf4x2_tum(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vluxseg3ei16_v_bf16mf4x3_tum(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vluxseg4ei16_v_bf16mf4x4_tum(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vluxseg5ei16_v_bf16mf4x5_tum(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vluxseg6ei16_v_bf16mf4x6_tum(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vluxseg7ei16_v_bf16mf4x7_tum(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vluxseg8ei16_v_bf16mf4x8_tum(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vluxseg2ei16_v_bf16mf2x2_tum(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vluxseg3ei16_v_bf16mf2x3_tum(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vluxseg4ei16_v_bf16mf2x4_tum(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vluxseg5ei16_v_bf16mf2x5_tum(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vluxseg6ei16_v_bf16mf2x6_tum(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vluxseg7ei16_v_bf16mf2x7_tum(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vluxseg8ei16_v_bf16mf2x8_tum(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vluxseg2ei16_v_bf16m1x2_tum(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vluxseg3ei16_v_bf16m1x3_tum(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vluxseg4ei16_v_bf16m1x4_tum(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vluxseg5ei16_v_bf16m1x5_tum(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vluxseg6ei16_v_bf16m1x6_tum(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vluxseg7ei16_v_bf16m1x7_tum(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vluxseg8ei16_v_bf16m1x8_tum(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vluxseg2ei16_v_bf16m2x2_tum(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vluxseg3ei16_v_bf16m2x3_tum(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vluxseg4ei16_v_bf16m2x4_tum(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vluxseg2ei16_v_bf16m4x2_tum(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vloxseg2ei16_v_bf16mf4x2_tumu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vloxseg3ei16_v_bf16mf4x3_tumu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vloxseg4ei16_v_bf16mf4x4_tumu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vloxseg5ei16_v_bf16mf4x5_tumu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vloxseg6ei16_v_bf16mf4x6_tumu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vloxseg7ei16_v_bf16mf4x7_tumu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vloxseg8ei16_v_bf16mf4x8_tumu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vloxseg2ei16_v_bf16mf2x2_tumu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vloxseg3ei16_v_bf16mf2x3_tumu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vloxseg4ei16_v_bf16mf2x4_tumu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vloxseg5ei16_v_bf16mf2x5_tumu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vloxseg6ei16_v_bf16mf2x6_tumu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vloxseg7ei16_v_bf16mf2x7_tumu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vloxseg8ei16_v_bf16mf2x8_tumu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vloxseg2ei16_v_bf16m1x2_tumu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, + size_t vl); +vbfloat16m1x3_t __riscv_vloxseg3ei16_v_bf16m1x3_tumu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, + size_t vl); +vbfloat16m1x4_t __riscv_vloxseg4ei16_v_bf16m1x4_tumu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, + size_t vl); +vbfloat16m1x5_t __riscv_vloxseg5ei16_v_bf16m1x5_tumu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, + size_t vl); +vbfloat16m1x6_t __riscv_vloxseg6ei16_v_bf16m1x6_tumu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, + size_t vl); +vbfloat16m1x7_t __riscv_vloxseg7ei16_v_bf16m1x7_tumu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, + size_t vl); +vbfloat16m1x8_t __riscv_vloxseg8ei16_v_bf16m1x8_tumu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, + size_t vl); +vbfloat16m2x2_t __riscv_vloxseg2ei16_v_bf16m2x2_tumu(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, + size_t vl); +vbfloat16m2x3_t __riscv_vloxseg3ei16_v_bf16m2x3_tumu(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, + size_t vl); +vbfloat16m2x4_t __riscv_vloxseg4ei16_v_bf16m2x4_tumu(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, + size_t vl); +vbfloat16m4x2_t __riscv_vloxseg2ei16_v_bf16m4x2_tumu(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, + size_t vl); +vbfloat16mf4x2_t __riscv_vluxseg2ei16_v_bf16mf4x2_tumu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vluxseg3ei16_v_bf16mf4x3_tumu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vluxseg4ei16_v_bf16mf4x4_tumu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vluxseg5ei16_v_bf16mf4x5_tumu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vluxseg6ei16_v_bf16mf4x6_tumu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vluxseg7ei16_v_bf16mf4x7_tumu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vluxseg8ei16_v_bf16mf4x8_tumu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vluxseg2ei16_v_bf16mf2x2_tumu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vluxseg3ei16_v_bf16mf2x3_tumu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vluxseg4ei16_v_bf16mf2x4_tumu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vluxseg5ei16_v_bf16mf2x5_tumu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vluxseg6ei16_v_bf16mf2x6_tumu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vluxseg7ei16_v_bf16mf2x7_tumu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vluxseg8ei16_v_bf16mf2x8_tumu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vluxseg2ei16_v_bf16m1x2_tumu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, + size_t vl); +vbfloat16m1x3_t __riscv_vluxseg3ei16_v_bf16m1x3_tumu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, + size_t vl); +vbfloat16m1x4_t __riscv_vluxseg4ei16_v_bf16m1x4_tumu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, + size_t vl); +vbfloat16m1x5_t __riscv_vluxseg5ei16_v_bf16m1x5_tumu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, + size_t vl); +vbfloat16m1x6_t __riscv_vluxseg6ei16_v_bf16m1x6_tumu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, + size_t vl); +vbfloat16m1x7_t __riscv_vluxseg7ei16_v_bf16m1x7_tumu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, + size_t vl); +vbfloat16m1x8_t __riscv_vluxseg8ei16_v_bf16m1x8_tumu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, + size_t vl); +vbfloat16m2x2_t __riscv_vluxseg2ei16_v_bf16m2x2_tumu(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, + size_t vl); +vbfloat16m2x3_t __riscv_vluxseg3ei16_v_bf16m2x3_tumu(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, + size_t vl); +vbfloat16m2x4_t __riscv_vluxseg4ei16_v_bf16m2x4_tumu(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, + size_t vl); +vbfloat16m4x2_t __riscv_vluxseg2ei16_v_bf16m4x2_tumu(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, + size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vloxseg2ei16_v_bf16mf4x2_mu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vloxseg3ei16_v_bf16mf4x3_mu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vloxseg4ei16_v_bf16mf4x4_mu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vloxseg5ei16_v_bf16mf4x5_mu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vloxseg6ei16_v_bf16mf4x6_mu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vloxseg7ei16_v_bf16mf4x7_mu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vloxseg8ei16_v_bf16mf4x8_mu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vloxseg2ei16_v_bf16mf2x2_mu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vloxseg3ei16_v_bf16mf2x3_mu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vloxseg4ei16_v_bf16mf2x4_mu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vloxseg5ei16_v_bf16mf2x5_mu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vloxseg6ei16_v_bf16mf2x6_mu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vloxseg7ei16_v_bf16mf2x7_mu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vloxseg8ei16_v_bf16mf2x8_mu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vloxseg2ei16_v_bf16m1x2_mu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vloxseg3ei16_v_bf16m1x3_mu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vloxseg4ei16_v_bf16m1x4_mu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vloxseg5ei16_v_bf16m1x5_mu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vloxseg6ei16_v_bf16m1x6_mu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vloxseg7ei16_v_bf16m1x7_mu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vloxseg8ei16_v_bf16m1x8_mu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vloxseg2ei16_v_bf16m2x2_mu(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vloxseg3ei16_v_bf16m2x3_mu(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vloxseg4ei16_v_bf16m2x4_mu(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vloxseg2ei16_v_bf16m4x2_mu(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +vbfloat16mf4x2_t __riscv_vluxseg2ei16_v_bf16mf4x2_mu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vluxseg3ei16_v_bf16mf4x3_mu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vluxseg4ei16_v_bf16mf4x4_mu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vluxseg5ei16_v_bf16mf4x5_mu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vluxseg6ei16_v_bf16mf4x6_mu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vluxseg7ei16_v_bf16mf4x7_mu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vluxseg8ei16_v_bf16mf4x8_mu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vluxseg2ei16_v_bf16mf2x2_mu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vluxseg3ei16_v_bf16mf2x3_mu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vluxseg4ei16_v_bf16mf2x4_mu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vluxseg5ei16_v_bf16mf2x5_mu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vluxseg6ei16_v_bf16mf2x6_mu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vluxseg7ei16_v_bf16mf2x7_mu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vluxseg8ei16_v_bf16mf2x8_mu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vluxseg2ei16_v_bf16m1x2_mu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vluxseg3ei16_v_bf16m1x3_mu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vluxseg4ei16_v_bf16m1x4_mu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vluxseg5ei16_v_bf16m1x5_mu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vluxseg6ei16_v_bf16m1x6_mu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vluxseg7ei16_v_bf16m1x7_mu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vluxseg8ei16_v_bf16m1x8_mu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vluxseg2ei16_v_bf16m2x2_mu(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vluxseg3ei16_v_bf16m2x3_mu(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vluxseg4ei16_v_bf16m2x4_mu(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vluxseg2ei16_v_bf16m4x2_mu(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +---- + +[[policy-variant-vector-indexed-segment-store]] +==== Vector Indexed Segment Store Intrinsics +Intrinsics here don't have a policy variant. + +=== BFloat16 Convert Intrinsics + +[[policy-variant-bf16-vector-narrow-convert]] +==== Vector Narrowing Convert Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vfncvtbf16_f_f_w_bf16mf4_tu(vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_f_w_bf16mf2_tu(vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_f_w_bf16m1_tu(vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_f_w_bf16m2_tu(vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_f_w_bf16m4_tu(vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f_f_w_bf16mf4_tum(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_f_w_bf16mf2_tum(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, + size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_f_w_bf16m1_tum(vbool16_t vm, + vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_f_w_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_f_w_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f_f_w_bf16mf4_tumu(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_f_w_bf16mf2_tumu(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, + size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_f_w_bf16m1_tumu(vbool16_t vm, + vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_f_w_bf16m2_tumu(vbool8_t vm, + vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_f_w_bf16m4_tumu(vbool4_t vm, + vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f_f_w_bf16mf4_mu(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_f_w_bf16mf2_mu(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_f_w_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_f_w_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_f_w_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl); +vbfloat16mf4_t __riscv_vfncvtbf16_f_f_w_bf16mf4_rm_tu(vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + unsigned int frm, + size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_f_w_bf16mf2_rm_tu(vbfloat16mf2_t vd, + vfloat32m1_t vs2, + unsigned int frm, + size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_f_w_bf16m1_rm_tu(vbfloat16m1_t vd, + vfloat32m2_t vs2, + unsigned int frm, + size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_f_w_bf16m2_rm_tu(vbfloat16m2_t vd, + vfloat32m4_t vs2, + unsigned int frm, + size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_f_w_bf16m4_rm_tu(vbfloat16m4_t vd, + vfloat32m8_t vs2, + unsigned int frm, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f_f_w_bf16mf4_rm_tum(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + unsigned int frm, + size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_f_w_bf16mf2_rm_tum(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, + unsigned int frm, + size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_f_w_bf16m1_rm_tum(vbool16_t vm, + vbfloat16m1_t vd, + vfloat32m2_t vs2, + unsigned int frm, + size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_f_w_bf16m2_rm_tum(vbool8_t vm, + vbfloat16m2_t vd, + vfloat32m4_t vs2, + unsigned int frm, + size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_f_w_bf16m4_rm_tum(vbool4_t vm, + vbfloat16m4_t vd, + vfloat32m8_t vs2, + unsigned int frm, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f_f_w_bf16mf4_rm_tumu(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + unsigned int frm, + size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_f_w_bf16mf2_rm_tumu(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, + unsigned int frm, + size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_f_w_bf16m1_rm_tumu(vbool16_t vm, + vbfloat16m1_t vd, + vfloat32m2_t vs2, + unsigned int frm, + size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_f_w_bf16m2_rm_tumu(vbool8_t vm, + vbfloat16m2_t vd, + vfloat32m4_t vs2, + unsigned int frm, + size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_f_w_bf16m4_rm_tumu(vbool4_t vm, + vbfloat16m4_t vd, + vfloat32m8_t vs2, + unsigned int frm, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f_f_w_bf16mf4_rm_mu(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + unsigned int frm, + size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_f_w_bf16mf2_rm_mu(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, + unsigned int frm, + size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_f_w_bf16m1_rm_mu(vbool16_t vm, + vbfloat16m1_t vd, + vfloat32m2_t vs2, + unsigned int frm, + size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_f_w_bf16m2_rm_mu(vbool8_t vm, + vbfloat16m2_t vd, + vfloat32m4_t vs2, + unsigned int frm, + size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_f_w_bf16m4_rm_mu(vbool4_t vm, + vbfloat16m4_t vd, + vfloat32m8_t vs2, + unsigned int frm, + size_t vl); +---- + +[[policy-variant-bf16-vector-widening-convert]] +==== Vector Widening Convert Intrinsics + +[,c] +---- +vfloat32mf2_t __riscv_vfwcvtbf16_f_f_v_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwcvtbf16_f_f_v_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwcvtbf16_f_f_v_f32m2_tu(vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwcvtbf16_f_f_v_f32m4_tu(vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwcvtbf16_f_f_v_f32m8_tu(vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwcvtbf16_f_f_v_f32mf2_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + size_t vl); +vfloat32m1_t __riscv_vfwcvtbf16_f_f_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwcvtbf16_f_f_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwcvtbf16_f_f_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwcvtbf16_f_f_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwcvtbf16_f_f_v_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + size_t vl); +vfloat32m1_t __riscv_vfwcvtbf16_f_f_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwcvtbf16_f_f_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwcvtbf16_f_f_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwcvtbf16_f_f_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwcvtbf16_f_f_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwcvtbf16_f_f_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwcvtbf16_f_f_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwcvtbf16_f_f_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwcvtbf16_f_f_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl); +---- + +=== BFloat16 Arithmetic Intrinsics + +[[policy-variant-bf16-widening-multiply-accumulate]] +==== Vector Widening Multiply-Accumulate Intrinsics + +[,c] +---- +vfloat32mf2_t __riscv_vfwmaccbf16_vv_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_vf_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vv_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vf_f32m1_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vv_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vf_f32m2_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vv_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vf_f32m4_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vv_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vf_f32m8_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwmaccbf16_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwmaccbf16_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwmaccbf16_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_vv_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_vf_f32mf2_rm_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vv_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vf_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vv_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vf_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vv_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vf_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vv_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vf_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwmaccbf16_vv_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_vf_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +// masked functions +vfloat32mf2_t +__riscv_vfwmaccbf16_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32mf2_t +__riscv_vfwmaccbf16_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, + vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwmaccbf16_vv_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_vf_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +---- + +[[policy-variant-vector-bf16-move]] +==== Vector BFloat16 Move Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vmv_v_v_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, + size_t vl); +vbfloat16mf2_t __riscv_vmv_v_v_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, + size_t vl); +vbfloat16m1_t __riscv_vmv_v_v_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, + size_t vl); +vbfloat16m2_t __riscv_vmv_v_v_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, + size_t vl); +vbfloat16m4_t __riscv_vmv_v_v_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, + size_t vl); +vbfloat16m8_t __riscv_vmv_v_v_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, + size_t vl); +---- + +[[policy-variant-vector-bf16-merge]] +==== Vector BFloat16 Merge Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vmerge_vvm_bf16mf4_tu(vbfloat16mf4_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, vbool64_t v0, + size_t vl); +vbfloat16mf2_t __riscv_vmerge_vvm_bf16mf2_tu(vbfloat16mf2_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, vbool32_t v0, + size_t vl); +vbfloat16m1_t __riscv_vmerge_vvm_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, vbool16_t v0, + size_t vl); +vbfloat16m2_t __riscv_vmerge_vvm_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, vbool8_t v0, + size_t vl); +vbfloat16m4_t __riscv_vmerge_vvm_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, vbool4_t v0, + size_t vl); +vbfloat16m8_t __riscv_vmerge_vvm_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs2, + vbfloat16m8_t vs1, vbool2_t v0, + size_t vl); +---- + +=== BFloat16 Miscellaneous Vector Utility Intrinsics + +[[policy-variant-reinterpret-cast-conversion]] +==== Reinterpret Cast Conversion Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-vector-lmul-extensionn]] +==== Vector LMUL Extension Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-vector-lmul-truncation]] +==== Vector LMUL Truncation Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-vector-initialization]] +==== Vector Initialization Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-vector-insertion]] +==== Vector Insertion Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-vector-extraction]] +==== Vector Extraction Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-vector-creation]] +==== Vector Creation Intrinsics +Intrinsics here don't have a policy variant. diff --git a/auto-generated/bfloat16/policy_funcs/intrinsic_funcs/00_bfloat16_vector_loads_and_stores_intrinsics.adoc b/auto-generated/bfloat16/policy_funcs/intrinsic_funcs/00_bfloat16_vector_loads_and_stores_intrinsics.adoc new file mode 100644 index 000000000..7d99fcc30 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/intrinsic_funcs/00_bfloat16_vector_loads_and_stores_intrinsics.adoc @@ -0,0 +1,372 @@ + +=== BFloat16 Vector Loads and Stores Intrinsics + +[[policy-variant-bf16-vector-unit-stride-load]] +==== Vector Unit-Stride Load Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vle16_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf2_t __riscv_vle16_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m1_t __riscv_vle16_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m2_t __riscv_vle16_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m4_t __riscv_vle16_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m8_t __riscv_vle16_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vle16_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2_t __riscv_vle16_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1_t __riscv_vle16_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2_t __riscv_vle16_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m4_t __riscv_vle16_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m8_t __riscv_vle16_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vle16_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2_t __riscv_vle16_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1_t __riscv_vle16_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2_t __riscv_vle16_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m4_t __riscv_vle16_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m8_t __riscv_vle16_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vle16_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2_t __riscv_vle16_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1_t __riscv_vle16_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2_t __riscv_vle16_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m4_t __riscv_vle16_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m8_t __riscv_vle16_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t vl); +---- + +[[policy-variant-bf16-vector-unit-stride-store]] +==== Vector Unit-Stride Store Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-vector-strided-load]] +==== Vector Strided Load Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vlse16_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vlse16_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1_t __riscv_vlse16_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2_t __riscv_vlse16_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m4_t __riscv_vlse16_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m8_t __riscv_vlse16_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vlse16_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2_t __riscv_vlse16_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1_t __riscv_vlse16_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vlse16_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vlse16_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vlse16_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vlse16_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2_t __riscv_vlse16_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1_t __riscv_vlse16_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vlse16_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vlse16_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vlse16_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vlse16_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2_t __riscv_vlse16_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1_t __riscv_vlse16_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vlse16_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vlse16_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vlse16_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +---- + +[[policy-variant-vector-strided-store]] +==== Vector Strided Store Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-vector-indexed-load]] +==== Vector Indexed Load Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vloxei16_v_bf16mf4_tu(vbfloat16mf4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vloxei16_v_bf16mf2_tu(vbfloat16mf2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vloxei16_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2_t __riscv_vloxei16_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4_t __riscv_vloxei16_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +vbfloat16m8_t __riscv_vloxei16_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, + vuint16m8_t rs2, size_t vl); +vbfloat16mf4_t __riscv_vluxei16_v_bf16mf4_tu(vbfloat16mf4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vluxei16_v_bf16mf2_tu(vbfloat16mf2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vluxei16_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2_t __riscv_vluxei16_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4_t __riscv_vluxei16_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +vbfloat16m8_t __riscv_vluxei16_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, + vuint16m8_t rs2, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vloxei16_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vloxei16_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vloxei16_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vloxei16_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vloxei16_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vloxei16_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +vbfloat16mf4_t __riscv_vluxei16_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vluxei16_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vluxei16_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vluxei16_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vluxei16_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vluxei16_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vloxei16_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vloxei16_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vloxei16_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vloxei16_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vloxei16_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vloxei16_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +vbfloat16mf4_t __riscv_vluxei16_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vluxei16_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vluxei16_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vluxei16_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vluxei16_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vluxei16_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vloxei16_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vloxei16_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vloxei16_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vloxei16_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vloxei16_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vloxei16_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +vbfloat16mf4_t __riscv_vluxei16_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vluxei16_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vluxei16_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vluxei16_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vluxei16_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vluxei16_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +---- + +[[policy-variant-vector-indexed-store]] +==== Vector Indexed Store Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-unit-stride-fault-only-first-loads]] +==== Unit-stride Fault-Only-First Loads Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vle16ff_v_bf16mf4_tu(vbfloat16mf4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2_t __riscv_vle16ff_v_bf16mf2_tu(vbfloat16mf2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1_t __riscv_vle16ff_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2_t __riscv_vle16ff_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m4_t __riscv_vle16ff_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m8_t __riscv_vle16ff_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vle16ff_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2_t __riscv_vle16ff_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1_t __riscv_vle16ff_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m2_t __riscv_vle16ff_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m4_t __riscv_vle16ff_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m8_t __riscv_vle16ff_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vle16ff_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2_t __riscv_vle16ff_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1_t __riscv_vle16ff_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m2_t __riscv_vle16ff_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m4_t __riscv_vle16ff_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m8_t __riscv_vle16ff_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vle16ff_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2_t __riscv_vle16ff_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1_t __riscv_vle16ff_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m2_t __riscv_vle16ff_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m4_t __riscv_vle16ff_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m8_t __riscv_vle16ff_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +---- diff --git a/auto-generated/bfloat16/policy_funcs/intrinsic_funcs/01_bfloat16_vector_loads_and_stores_segment_intrinsics.adoc b/auto-generated/bfloat16/policy_funcs/intrinsic_funcs/01_bfloat16_vector_loads_and_stores_segment_intrinsics.adoc new file mode 100644 index 000000000..f67848b46 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/intrinsic_funcs/01_bfloat16_vector_loads_and_stores_segment_intrinsics.adoc @@ -0,0 +1,1991 @@ + +=== BFloat16 Vector Loads and Stores Segment Intrinsics + +[[policy-variant-vector-unit-stride-segment-load]] +==== Vector Unit-Stride Segment Load Intrinsics + +[,c] +---- +vbfloat16mf4x2_t __riscv_vlseg2e16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x2_t __riscv_vlseg2e16ff_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16ff_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16ff_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16ff_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16ff_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16ff_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16ff_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16ff_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16ff_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16ff_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16ff_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16ff_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16ff_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16ff_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16ff_v_bf16m1x2_tu(vbfloat16m1x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16ff_v_bf16m1x3_tu(vbfloat16m1x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16ff_v_bf16m1x4_tu(vbfloat16m1x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16ff_v_bf16m1x5_tu(vbfloat16m1x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16ff_v_bf16m1x6_tu(vbfloat16m1x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16ff_v_bf16m1x7_tu(vbfloat16m1x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16ff_v_bf16m1x8_tu(vbfloat16m1x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16ff_v_bf16m2x2_tu(vbfloat16m2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16ff_v_bf16m2x3_tu(vbfloat16m2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16ff_v_bf16m2x4_tu(vbfloat16m2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16ff_v_bf16m4x2_tu(vbfloat16m4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vlseg2e16_v_bf16mf4x2_tum(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16_v_bf16mf4x3_tum(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16_v_bf16mf4x4_tum(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16_v_bf16mf4x5_tum(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16_v_bf16mf4x6_tum(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16_v_bf16mf4x7_tum(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16_v_bf16mf4x8_tum(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16_v_bf16mf2x2_tum(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16_v_bf16mf2x3_tum(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16_v_bf16mf2x4_tum(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16_v_bf16mf2x5_tum(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16_v_bf16mf2x6_tum(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16_v_bf16mf2x7_tum(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16_v_bf16mf2x8_tum(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16_v_bf16m1x2_tum(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16_v_bf16m1x3_tum(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16_v_bf16m1x4_tum(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16_v_bf16m1x5_tum(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16_v_bf16m1x6_tum(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16_v_bf16m1x7_tum(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16_v_bf16m1x8_tum(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16_v_bf16m2x2_tum(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16_v_bf16m2x3_tum(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16_v_bf16m2x4_tum(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16_v_bf16m4x2_tum(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x2_t __riscv_vlseg2e16ff_v_bf16mf4x2_tum(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16ff_v_bf16mf4x3_tum(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16ff_v_bf16mf4x4_tum(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16ff_v_bf16mf4x5_tum(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16ff_v_bf16mf4x6_tum(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16ff_v_bf16mf4x7_tum(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16ff_v_bf16mf4x8_tum(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16ff_v_bf16mf2x2_tum(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16ff_v_bf16mf2x3_tum(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16ff_v_bf16mf2x4_tum(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16ff_v_bf16mf2x5_tum(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16ff_v_bf16mf2x6_tum(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16ff_v_bf16mf2x7_tum(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16ff_v_bf16mf2x8_tum(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16ff_v_bf16m1x2_tum(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16ff_v_bf16m1x3_tum(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16ff_v_bf16m1x4_tum(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16ff_v_bf16m1x5_tum(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16ff_v_bf16m1x6_tum(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16ff_v_bf16m1x7_tum(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16ff_v_bf16m1x8_tum(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16ff_v_bf16m2x2_tum(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16ff_v_bf16m2x3_tum(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16ff_v_bf16m2x4_tum(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16ff_v_bf16m4x2_tum(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vlseg2e16_v_bf16mf4x2_tumu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16_v_bf16mf4x3_tumu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16_v_bf16mf4x4_tumu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16_v_bf16mf4x5_tumu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16_v_bf16mf4x6_tumu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16_v_bf16mf4x7_tumu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16_v_bf16mf4x8_tumu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16_v_bf16mf2x2_tumu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16_v_bf16mf2x3_tumu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16_v_bf16mf2x4_tumu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16_v_bf16mf2x5_tumu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16_v_bf16mf2x6_tumu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16_v_bf16mf2x7_tumu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16_v_bf16mf2x8_tumu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16_v_bf16m1x2_tumu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16_v_bf16m1x3_tumu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16_v_bf16m1x4_tumu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16_v_bf16m1x5_tumu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16_v_bf16m1x6_tumu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16_v_bf16m1x7_tumu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16_v_bf16m1x8_tumu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16_v_bf16m2x2_tumu(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16_v_bf16m2x3_tumu(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16_v_bf16m2x4_tumu(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16_v_bf16m4x2_tumu(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x2_t __riscv_vlseg2e16ff_v_bf16mf4x2_tumu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, + size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16ff_v_bf16mf4x3_tumu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + size_t *new_vl, + size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16ff_v_bf16mf4x4_tumu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + size_t *new_vl, + size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16ff_v_bf16mf4x5_tumu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + size_t *new_vl, + size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16ff_v_bf16mf4x6_tumu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + size_t *new_vl, + size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16ff_v_bf16mf4x7_tumu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + size_t *new_vl, + size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16ff_v_bf16mf4x8_tumu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + size_t *new_vl, + size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16ff_v_bf16mf2x2_tumu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, + size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16ff_v_bf16mf2x3_tumu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, + size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16ff_v_bf16mf2x4_tumu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, + size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16ff_v_bf16mf2x5_tumu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + size_t *new_vl, + size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16ff_v_bf16mf2x6_tumu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + size_t *new_vl, + size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16ff_v_bf16mf2x7_tumu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + size_t *new_vl, + size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16ff_v_bf16mf2x8_tumu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + size_t *new_vl, + size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16ff_v_bf16m1x2_tumu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16ff_v_bf16m1x3_tumu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16ff_v_bf16m1x4_tumu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16ff_v_bf16m1x5_tumu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16ff_v_bf16m1x6_tumu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16ff_v_bf16m1x7_tumu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16ff_v_bf16m1x8_tumu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16ff_v_bf16m2x2_tumu(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16ff_v_bf16m2x3_tumu(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16ff_v_bf16m2x4_tumu(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16ff_v_bf16m4x2_tumu(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vlseg2e16_v_bf16mf4x2_mu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16_v_bf16mf4x3_mu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16_v_bf16mf4x4_mu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16_v_bf16mf4x5_mu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16_v_bf16mf4x6_mu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16_v_bf16mf4x7_mu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16_v_bf16mf4x8_mu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16_v_bf16mf2x2_mu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16_v_bf16mf2x3_mu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16_v_bf16mf2x4_mu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16_v_bf16mf2x5_mu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16_v_bf16mf2x6_mu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16_v_bf16mf2x7_mu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16_v_bf16mf2x8_mu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16_v_bf16m1x2_mu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16_v_bf16m1x3_mu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16_v_bf16m1x4_mu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16_v_bf16m1x5_mu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16_v_bf16m1x6_mu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16_v_bf16m1x7_mu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16_v_bf16m1x8_mu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x2_t __riscv_vlseg2e16ff_v_bf16mf4x2_mu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16ff_v_bf16mf4x3_mu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16ff_v_bf16mf4x4_mu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16ff_v_bf16mf4x5_mu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16ff_v_bf16mf4x6_mu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16ff_v_bf16mf4x7_mu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16ff_v_bf16mf4x8_mu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16ff_v_bf16mf2x2_mu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16ff_v_bf16mf2x3_mu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16ff_v_bf16mf2x4_mu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16ff_v_bf16mf2x5_mu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16ff_v_bf16mf2x6_mu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16ff_v_bf16mf2x7_mu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16ff_v_bf16mf2x8_mu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16ff_v_bf16m1x2_mu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16ff_v_bf16m1x3_mu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16ff_v_bf16m1x4_mu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16ff_v_bf16m1x5_mu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16ff_v_bf16m1x6_mu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16ff_v_bf16m1x7_mu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16ff_v_bf16m1x8_mu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16ff_v_bf16m2x2_mu(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16ff_v_bf16m2x3_mu(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16ff_v_bf16m2x4_mu(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16ff_v_bf16m4x2_mu(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl); +---- + +[[policy-variant-vecrtor-unit-stride-segment-store]] +==== Vector Unit-Stride Segment Store Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-vector-strided-segment-load]] +==== Vector Strided Segment Load Intrinsics + +[,c] +---- +vbfloat16mf4x2_t __riscv_vlsseg2e16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x3_t __riscv_vlsseg3e16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x4_t __riscv_vlsseg4e16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x5_t __riscv_vlsseg5e16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x6_t __riscv_vlsseg6e16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x7_t __riscv_vlsseg7e16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x8_t __riscv_vlsseg8e16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x2_t __riscv_vlsseg2e16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x3_t __riscv_vlsseg3e16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x4_t __riscv_vlsseg4e16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x5_t __riscv_vlsseg5e16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x6_t __riscv_vlsseg6e16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x7_t __riscv_vlsseg7e16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x8_t __riscv_vlsseg8e16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x2_t __riscv_vlsseg2e16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vlsseg3e16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vlsseg4e16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vlsseg5e16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vlsseg6e16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vlsseg7e16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vlsseg8e16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vlsseg2e16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vlsseg3e16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vlsseg4e16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vlsseg2e16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vlsseg2e16_v_bf16mf4x2_tum(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x3_t __riscv_vlsseg3e16_v_bf16mf4x3_tum(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x4_t __riscv_vlsseg4e16_v_bf16mf4x4_tum(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x5_t __riscv_vlsseg5e16_v_bf16mf4x5_tum(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x6_t __riscv_vlsseg6e16_v_bf16mf4x6_tum(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x7_t __riscv_vlsseg7e16_v_bf16mf4x7_tum(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x8_t __riscv_vlsseg8e16_v_bf16mf4x8_tum(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x2_t __riscv_vlsseg2e16_v_bf16mf2x2_tum(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x3_t __riscv_vlsseg3e16_v_bf16mf2x3_tum(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x4_t __riscv_vlsseg4e16_v_bf16mf2x4_tum(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x5_t __riscv_vlsseg5e16_v_bf16mf2x5_tum(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x6_t __riscv_vlsseg6e16_v_bf16mf2x6_tum(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x7_t __riscv_vlsseg7e16_v_bf16mf2x7_tum(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x8_t __riscv_vlsseg8e16_v_bf16mf2x8_tum(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x2_t __riscv_vlsseg2e16_v_bf16m1x2_tum(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vlsseg3e16_v_bf16m1x3_tum(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vlsseg4e16_v_bf16m1x4_tum(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vlsseg5e16_v_bf16m1x5_tum(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vlsseg6e16_v_bf16m1x6_tum(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vlsseg7e16_v_bf16m1x7_tum(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vlsseg8e16_v_bf16m1x8_tum(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vlsseg2e16_v_bf16m2x2_tum(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vlsseg3e16_v_bf16m2x3_tum(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vlsseg4e16_v_bf16m2x4_tum(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vlsseg2e16_v_bf16m4x2_tum(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vlsseg2e16_v_bf16mf4x2_tumu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x3_t __riscv_vlsseg3e16_v_bf16mf4x3_tumu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x4_t __riscv_vlsseg4e16_v_bf16mf4x4_tumu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x5_t __riscv_vlsseg5e16_v_bf16mf4x5_tumu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x6_t __riscv_vlsseg6e16_v_bf16mf4x6_tumu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x7_t __riscv_vlsseg7e16_v_bf16mf4x7_tumu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x8_t __riscv_vlsseg8e16_v_bf16mf4x8_tumu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x2_t __riscv_vlsseg2e16_v_bf16mf2x2_tumu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x3_t __riscv_vlsseg3e16_v_bf16mf2x3_tumu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x4_t __riscv_vlsseg4e16_v_bf16mf2x4_tumu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x5_t __riscv_vlsseg5e16_v_bf16mf2x5_tumu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x6_t __riscv_vlsseg6e16_v_bf16mf2x6_tumu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x7_t __riscv_vlsseg7e16_v_bf16mf2x7_tumu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x8_t __riscv_vlsseg8e16_v_bf16mf2x8_tumu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x2_t __riscv_vlsseg2e16_v_bf16m1x2_tumu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vlsseg3e16_v_bf16m1x3_tumu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vlsseg4e16_v_bf16m1x4_tumu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vlsseg5e16_v_bf16m1x5_tumu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vlsseg6e16_v_bf16m1x6_tumu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vlsseg7e16_v_bf16m1x7_tumu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vlsseg8e16_v_bf16m1x8_tumu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vlsseg2e16_v_bf16m2x2_tumu(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vlsseg3e16_v_bf16m2x3_tumu(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vlsseg4e16_v_bf16m2x4_tumu(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vlsseg2e16_v_bf16m4x2_tumu(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vlsseg2e16_v_bf16mf4x2_mu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x3_t __riscv_vlsseg3e16_v_bf16mf4x3_mu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x4_t __riscv_vlsseg4e16_v_bf16mf4x4_mu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x5_t __riscv_vlsseg5e16_v_bf16mf4x5_mu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x6_t __riscv_vlsseg6e16_v_bf16mf4x6_mu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x7_t __riscv_vlsseg7e16_v_bf16mf4x7_mu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x8_t __riscv_vlsseg8e16_v_bf16mf4x8_mu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x2_t __riscv_vlsseg2e16_v_bf16mf2x2_mu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x3_t __riscv_vlsseg3e16_v_bf16mf2x3_mu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x4_t __riscv_vlsseg4e16_v_bf16mf2x4_mu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x5_t __riscv_vlsseg5e16_v_bf16mf2x5_mu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x6_t __riscv_vlsseg6e16_v_bf16mf2x6_mu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x7_t __riscv_vlsseg7e16_v_bf16mf2x7_mu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x8_t __riscv_vlsseg8e16_v_bf16mf2x8_mu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x2_t __riscv_vlsseg2e16_v_bf16m1x2_mu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vlsseg3e16_v_bf16m1x3_mu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vlsseg4e16_v_bf16m1x4_mu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vlsseg5e16_v_bf16m1x5_mu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vlsseg6e16_v_bf16m1x6_mu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vlsseg7e16_v_bf16m1x7_mu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vlsseg8e16_v_bf16m1x8_mu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vlsseg2e16_v_bf16m2x2_mu(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vlsseg3e16_v_bf16m2x3_mu(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vlsseg4e16_v_bf16m2x4_mu(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vlsseg2e16_v_bf16m4x2_mu(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +---- + +[[policy-variant-vector-strided-segment-store]] +==== Vector Strided Segment Store Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-vector-indexed-segment-load]] +==== Vector Indexed Segment Load Intrinsics + +[,c] +---- +vbfloat16mf4x2_t __riscv_vloxseg2ei16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vloxseg3ei16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vloxseg4ei16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vloxseg5ei16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vloxseg6ei16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vloxseg7ei16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vloxseg8ei16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vloxseg2ei16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vloxseg3ei16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vloxseg4ei16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vloxseg5ei16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vloxseg6ei16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vloxseg7ei16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vloxseg8ei16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vloxseg2ei16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vloxseg3ei16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vloxseg4ei16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vloxseg5ei16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vloxseg6ei16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vloxseg7ei16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vloxseg8ei16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vloxseg2ei16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vloxseg3ei16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vloxseg4ei16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vloxseg2ei16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +vbfloat16mf4x2_t __riscv_vluxseg2ei16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vluxseg3ei16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vluxseg4ei16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vluxseg5ei16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vluxseg6ei16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vluxseg7ei16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vluxseg8ei16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vluxseg2ei16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vluxseg3ei16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vluxseg4ei16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vluxseg5ei16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vluxseg6ei16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vluxseg7ei16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vluxseg8ei16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vluxseg2ei16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vluxseg3ei16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vluxseg4ei16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vluxseg5ei16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vluxseg6ei16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vluxseg7ei16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vluxseg8ei16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vluxseg2ei16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vluxseg3ei16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vluxseg4ei16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vluxseg2ei16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vloxseg2ei16_v_bf16mf4x2_tum(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vloxseg3ei16_v_bf16mf4x3_tum(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vloxseg4ei16_v_bf16mf4x4_tum(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vloxseg5ei16_v_bf16mf4x5_tum(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vloxseg6ei16_v_bf16mf4x6_tum(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vloxseg7ei16_v_bf16mf4x7_tum(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vloxseg8ei16_v_bf16mf4x8_tum(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vloxseg2ei16_v_bf16mf2x2_tum(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vloxseg3ei16_v_bf16mf2x3_tum(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vloxseg4ei16_v_bf16mf2x4_tum(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vloxseg5ei16_v_bf16mf2x5_tum(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vloxseg6ei16_v_bf16mf2x6_tum(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vloxseg7ei16_v_bf16mf2x7_tum(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vloxseg8ei16_v_bf16mf2x8_tum(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vloxseg2ei16_v_bf16m1x2_tum(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vloxseg3ei16_v_bf16m1x3_tum(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vloxseg4ei16_v_bf16m1x4_tum(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vloxseg5ei16_v_bf16m1x5_tum(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vloxseg6ei16_v_bf16m1x6_tum(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vloxseg7ei16_v_bf16m1x7_tum(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vloxseg8ei16_v_bf16m1x8_tum(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vloxseg2ei16_v_bf16m2x2_tum(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vloxseg3ei16_v_bf16m2x3_tum(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vloxseg4ei16_v_bf16m2x4_tum(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vloxseg2ei16_v_bf16m4x2_tum(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +vbfloat16mf4x2_t __riscv_vluxseg2ei16_v_bf16mf4x2_tum(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vluxseg3ei16_v_bf16mf4x3_tum(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vluxseg4ei16_v_bf16mf4x4_tum(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vluxseg5ei16_v_bf16mf4x5_tum(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vluxseg6ei16_v_bf16mf4x6_tum(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vluxseg7ei16_v_bf16mf4x7_tum(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vluxseg8ei16_v_bf16mf4x8_tum(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vluxseg2ei16_v_bf16mf2x2_tum(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vluxseg3ei16_v_bf16mf2x3_tum(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vluxseg4ei16_v_bf16mf2x4_tum(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vluxseg5ei16_v_bf16mf2x5_tum(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vluxseg6ei16_v_bf16mf2x6_tum(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vluxseg7ei16_v_bf16mf2x7_tum(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vluxseg8ei16_v_bf16mf2x8_tum(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vluxseg2ei16_v_bf16m1x2_tum(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vluxseg3ei16_v_bf16m1x3_tum(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vluxseg4ei16_v_bf16m1x4_tum(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vluxseg5ei16_v_bf16m1x5_tum(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vluxseg6ei16_v_bf16m1x6_tum(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vluxseg7ei16_v_bf16m1x7_tum(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vluxseg8ei16_v_bf16m1x8_tum(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vluxseg2ei16_v_bf16m2x2_tum(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vluxseg3ei16_v_bf16m2x3_tum(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vluxseg4ei16_v_bf16m2x4_tum(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vluxseg2ei16_v_bf16m4x2_tum(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vloxseg2ei16_v_bf16mf4x2_tumu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vloxseg3ei16_v_bf16mf4x3_tumu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vloxseg4ei16_v_bf16mf4x4_tumu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vloxseg5ei16_v_bf16mf4x5_tumu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vloxseg6ei16_v_bf16mf4x6_tumu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vloxseg7ei16_v_bf16mf4x7_tumu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vloxseg8ei16_v_bf16mf4x8_tumu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vloxseg2ei16_v_bf16mf2x2_tumu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vloxseg3ei16_v_bf16mf2x3_tumu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vloxseg4ei16_v_bf16mf2x4_tumu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vloxseg5ei16_v_bf16mf2x5_tumu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vloxseg6ei16_v_bf16mf2x6_tumu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vloxseg7ei16_v_bf16mf2x7_tumu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vloxseg8ei16_v_bf16mf2x8_tumu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vloxseg2ei16_v_bf16m1x2_tumu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, + size_t vl); +vbfloat16m1x3_t __riscv_vloxseg3ei16_v_bf16m1x3_tumu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, + size_t vl); +vbfloat16m1x4_t __riscv_vloxseg4ei16_v_bf16m1x4_tumu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, + size_t vl); +vbfloat16m1x5_t __riscv_vloxseg5ei16_v_bf16m1x5_tumu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, + size_t vl); +vbfloat16m1x6_t __riscv_vloxseg6ei16_v_bf16m1x6_tumu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, + size_t vl); +vbfloat16m1x7_t __riscv_vloxseg7ei16_v_bf16m1x7_tumu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, + size_t vl); +vbfloat16m1x8_t __riscv_vloxseg8ei16_v_bf16m1x8_tumu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, + size_t vl); +vbfloat16m2x2_t __riscv_vloxseg2ei16_v_bf16m2x2_tumu(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, + size_t vl); +vbfloat16m2x3_t __riscv_vloxseg3ei16_v_bf16m2x3_tumu(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, + size_t vl); +vbfloat16m2x4_t __riscv_vloxseg4ei16_v_bf16m2x4_tumu(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, + size_t vl); +vbfloat16m4x2_t __riscv_vloxseg2ei16_v_bf16m4x2_tumu(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, + size_t vl); +vbfloat16mf4x2_t __riscv_vluxseg2ei16_v_bf16mf4x2_tumu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vluxseg3ei16_v_bf16mf4x3_tumu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vluxseg4ei16_v_bf16mf4x4_tumu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vluxseg5ei16_v_bf16mf4x5_tumu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vluxseg6ei16_v_bf16mf4x6_tumu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vluxseg7ei16_v_bf16mf4x7_tumu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vluxseg8ei16_v_bf16mf4x8_tumu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vluxseg2ei16_v_bf16mf2x2_tumu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vluxseg3ei16_v_bf16mf2x3_tumu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vluxseg4ei16_v_bf16mf2x4_tumu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vluxseg5ei16_v_bf16mf2x5_tumu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vluxseg6ei16_v_bf16mf2x6_tumu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vluxseg7ei16_v_bf16mf2x7_tumu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vluxseg8ei16_v_bf16mf2x8_tumu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vluxseg2ei16_v_bf16m1x2_tumu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, + size_t vl); +vbfloat16m1x3_t __riscv_vluxseg3ei16_v_bf16m1x3_tumu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, + size_t vl); +vbfloat16m1x4_t __riscv_vluxseg4ei16_v_bf16m1x4_tumu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, + size_t vl); +vbfloat16m1x5_t __riscv_vluxseg5ei16_v_bf16m1x5_tumu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, + size_t vl); +vbfloat16m1x6_t __riscv_vluxseg6ei16_v_bf16m1x6_tumu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, + size_t vl); +vbfloat16m1x7_t __riscv_vluxseg7ei16_v_bf16m1x7_tumu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, + size_t vl); +vbfloat16m1x8_t __riscv_vluxseg8ei16_v_bf16m1x8_tumu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, + size_t vl); +vbfloat16m2x2_t __riscv_vluxseg2ei16_v_bf16m2x2_tumu(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, + size_t vl); +vbfloat16m2x3_t __riscv_vluxseg3ei16_v_bf16m2x3_tumu(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, + size_t vl); +vbfloat16m2x4_t __riscv_vluxseg4ei16_v_bf16m2x4_tumu(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, + size_t vl); +vbfloat16m4x2_t __riscv_vluxseg2ei16_v_bf16m4x2_tumu(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, + size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vloxseg2ei16_v_bf16mf4x2_mu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vloxseg3ei16_v_bf16mf4x3_mu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vloxseg4ei16_v_bf16mf4x4_mu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vloxseg5ei16_v_bf16mf4x5_mu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vloxseg6ei16_v_bf16mf4x6_mu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vloxseg7ei16_v_bf16mf4x7_mu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vloxseg8ei16_v_bf16mf4x8_mu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vloxseg2ei16_v_bf16mf2x2_mu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vloxseg3ei16_v_bf16mf2x3_mu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vloxseg4ei16_v_bf16mf2x4_mu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vloxseg5ei16_v_bf16mf2x5_mu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vloxseg6ei16_v_bf16mf2x6_mu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vloxseg7ei16_v_bf16mf2x7_mu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vloxseg8ei16_v_bf16mf2x8_mu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vloxseg2ei16_v_bf16m1x2_mu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vloxseg3ei16_v_bf16m1x3_mu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vloxseg4ei16_v_bf16m1x4_mu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vloxseg5ei16_v_bf16m1x5_mu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vloxseg6ei16_v_bf16m1x6_mu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vloxseg7ei16_v_bf16m1x7_mu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vloxseg8ei16_v_bf16m1x8_mu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vloxseg2ei16_v_bf16m2x2_mu(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vloxseg3ei16_v_bf16m2x3_mu(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vloxseg4ei16_v_bf16m2x4_mu(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vloxseg2ei16_v_bf16m4x2_mu(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +vbfloat16mf4x2_t __riscv_vluxseg2ei16_v_bf16mf4x2_mu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vluxseg3ei16_v_bf16mf4x3_mu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vluxseg4ei16_v_bf16mf4x4_mu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vluxseg5ei16_v_bf16mf4x5_mu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vluxseg6ei16_v_bf16mf4x6_mu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vluxseg7ei16_v_bf16mf4x7_mu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vluxseg8ei16_v_bf16mf4x8_mu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vluxseg2ei16_v_bf16mf2x2_mu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vluxseg3ei16_v_bf16mf2x3_mu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vluxseg4ei16_v_bf16mf2x4_mu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vluxseg5ei16_v_bf16mf2x5_mu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vluxseg6ei16_v_bf16mf2x6_mu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vluxseg7ei16_v_bf16mf2x7_mu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vluxseg8ei16_v_bf16mf2x8_mu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vluxseg2ei16_v_bf16m1x2_mu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vluxseg3ei16_v_bf16m1x3_mu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vluxseg4ei16_v_bf16m1x4_mu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vluxseg5ei16_v_bf16m1x5_mu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vluxseg6ei16_v_bf16m1x6_mu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vluxseg7ei16_v_bf16m1x7_mu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vluxseg8ei16_v_bf16m1x8_mu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vluxseg2ei16_v_bf16m2x2_mu(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vluxseg3ei16_v_bf16m2x3_mu(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vluxseg4ei16_v_bf16m2x4_mu(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vluxseg2ei16_v_bf16m4x2_mu(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +---- + +[[policy-variant-vector-indexed-segment-store]] +==== Vector Indexed Segment Store Intrinsics +Intrinsics here don't have a policy variant. diff --git a/auto-generated/bfloat16/policy_funcs/intrinsic_funcs/02_bfloat16_convert_intrinsics.adoc b/auto-generated/bfloat16/policy_funcs/intrinsic_funcs/02_bfloat16_convert_intrinsics.adoc new file mode 100644 index 000000000..c807ad197 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/intrinsic_funcs/02_bfloat16_convert_intrinsics.adoc @@ -0,0 +1,220 @@ + +=== BFloat16 Convert Intrinsics + +[[policy-variant-bf16-vector-narrow-convert]] +==== Vector Narrowing Convert Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vfncvtbf16_f_f_w_bf16mf4_tu(vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_f_w_bf16mf2_tu(vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_f_w_bf16m1_tu(vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_f_w_bf16m2_tu(vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_f_w_bf16m4_tu(vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f_f_w_bf16mf4_tum(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_f_w_bf16mf2_tum(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, + size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_f_w_bf16m1_tum(vbool16_t vm, + vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_f_w_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_f_w_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f_f_w_bf16mf4_tumu(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_f_w_bf16mf2_tumu(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, + size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_f_w_bf16m1_tumu(vbool16_t vm, + vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_f_w_bf16m2_tumu(vbool8_t vm, + vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_f_w_bf16m4_tumu(vbool4_t vm, + vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f_f_w_bf16mf4_mu(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_f_w_bf16mf2_mu(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_f_w_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_f_w_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_f_w_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl); +vbfloat16mf4_t __riscv_vfncvtbf16_f_f_w_bf16mf4_rm_tu(vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + unsigned int frm, + size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_f_w_bf16mf2_rm_tu(vbfloat16mf2_t vd, + vfloat32m1_t vs2, + unsigned int frm, + size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_f_w_bf16m1_rm_tu(vbfloat16m1_t vd, + vfloat32m2_t vs2, + unsigned int frm, + size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_f_w_bf16m2_rm_tu(vbfloat16m2_t vd, + vfloat32m4_t vs2, + unsigned int frm, + size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_f_w_bf16m4_rm_tu(vbfloat16m4_t vd, + vfloat32m8_t vs2, + unsigned int frm, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f_f_w_bf16mf4_rm_tum(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + unsigned int frm, + size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_f_w_bf16mf2_rm_tum(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, + unsigned int frm, + size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_f_w_bf16m1_rm_tum(vbool16_t vm, + vbfloat16m1_t vd, + vfloat32m2_t vs2, + unsigned int frm, + size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_f_w_bf16m2_rm_tum(vbool8_t vm, + vbfloat16m2_t vd, + vfloat32m4_t vs2, + unsigned int frm, + size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_f_w_bf16m4_rm_tum(vbool4_t vm, + vbfloat16m4_t vd, + vfloat32m8_t vs2, + unsigned int frm, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f_f_w_bf16mf4_rm_tumu(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + unsigned int frm, + size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_f_w_bf16mf2_rm_tumu(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, + unsigned int frm, + size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_f_w_bf16m1_rm_tumu(vbool16_t vm, + vbfloat16m1_t vd, + vfloat32m2_t vs2, + unsigned int frm, + size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_f_w_bf16m2_rm_tumu(vbool8_t vm, + vbfloat16m2_t vd, + vfloat32m4_t vs2, + unsigned int frm, + size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_f_w_bf16m4_rm_tumu(vbool4_t vm, + vbfloat16m4_t vd, + vfloat32m8_t vs2, + unsigned int frm, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f_f_w_bf16mf4_rm_mu(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + unsigned int frm, + size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_f_w_bf16mf2_rm_mu(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, + unsigned int frm, + size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_f_w_bf16m1_rm_mu(vbool16_t vm, + vbfloat16m1_t vd, + vfloat32m2_t vs2, + unsigned int frm, + size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_f_w_bf16m2_rm_mu(vbool8_t vm, + vbfloat16m2_t vd, + vfloat32m4_t vs2, + unsigned int frm, + size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_f_w_bf16m4_rm_mu(vbool4_t vm, + vbfloat16m4_t vd, + vfloat32m8_t vs2, + unsigned int frm, + size_t vl); +---- + +[[policy-variant-bf16-vector-widening-convert]] +==== Vector Widening Convert Intrinsics + +[,c] +---- +vfloat32mf2_t __riscv_vfwcvtbf16_f_f_v_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwcvtbf16_f_f_v_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwcvtbf16_f_f_v_f32m2_tu(vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwcvtbf16_f_f_v_f32m4_tu(vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwcvtbf16_f_f_v_f32m8_tu(vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwcvtbf16_f_f_v_f32mf2_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + size_t vl); +vfloat32m1_t __riscv_vfwcvtbf16_f_f_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwcvtbf16_f_f_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwcvtbf16_f_f_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwcvtbf16_f_f_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwcvtbf16_f_f_v_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + size_t vl); +vfloat32m1_t __riscv_vfwcvtbf16_f_f_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwcvtbf16_f_f_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwcvtbf16_f_f_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwcvtbf16_f_f_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwcvtbf16_f_f_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwcvtbf16_f_f_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwcvtbf16_f_f_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwcvtbf16_f_f_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwcvtbf16_f_f_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl); +---- diff --git a/auto-generated/bfloat16/policy_funcs/intrinsic_funcs/03_bfloat16_arithmetic_intrinsics.adoc b/auto-generated/bfloat16/policy_funcs/intrinsic_funcs/03_bfloat16_arithmetic_intrinsics.adoc new file mode 100644 index 000000000..0c92dcf2e --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/intrinsic_funcs/03_bfloat16_arithmetic_intrinsics.adoc @@ -0,0 +1,319 @@ + +=== BFloat16 Arithmetic Intrinsics + +[[policy-variant-bf16-widening-multiply-accumulate]] +==== Vector Widening Multiply-Accumulate Intrinsics + +[,c] +---- +vfloat32mf2_t __riscv_vfwmaccbf16_vv_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_vf_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vv_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vf_f32m1_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vv_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vf_f32m2_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vv_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vf_f32m4_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vv_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vf_f32m8_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwmaccbf16_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwmaccbf16_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwmaccbf16_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_vv_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_vf_f32mf2_rm_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vv_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vf_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vv_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vf_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vv_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vf_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vv_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vf_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwmaccbf16_vv_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_vf_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +// masked functions +vfloat32mf2_t +__riscv_vfwmaccbf16_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32mf2_t +__riscv_vfwmaccbf16_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, + vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwmaccbf16_vv_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_vf_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +---- + +[[policy-variant-vector-bf16-move]] +==== Vector BFloat16 Move Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vmv_v_v_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, + size_t vl); +vbfloat16mf2_t __riscv_vmv_v_v_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, + size_t vl); +vbfloat16m1_t __riscv_vmv_v_v_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, + size_t vl); +vbfloat16m2_t __riscv_vmv_v_v_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, + size_t vl); +vbfloat16m4_t __riscv_vmv_v_v_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, + size_t vl); +vbfloat16m8_t __riscv_vmv_v_v_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, + size_t vl); +---- + +[[policy-variant-vector-bf16-merge]] +==== Vector BFloat16 Merge Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vmerge_vvm_bf16mf4_tu(vbfloat16mf4_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, vbool64_t v0, + size_t vl); +vbfloat16mf2_t __riscv_vmerge_vvm_bf16mf2_tu(vbfloat16mf2_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, vbool32_t v0, + size_t vl); +vbfloat16m1_t __riscv_vmerge_vvm_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, vbool16_t v0, + size_t vl); +vbfloat16m2_t __riscv_vmerge_vvm_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, vbool8_t v0, + size_t vl); +vbfloat16m4_t __riscv_vmerge_vvm_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, vbool4_t v0, + size_t vl); +vbfloat16m8_t __riscv_vmerge_vvm_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs2, + vbfloat16m8_t vs1, vbool2_t v0, + size_t vl); +---- diff --git a/auto-generated/bfloat16/policy_funcs/intrinsic_funcs/04_bfloat16_miscellaneous_vector_utility_intrinsics.adoc b/auto-generated/bfloat16/policy_funcs/intrinsic_funcs/04_bfloat16_miscellaneous_vector_utility_intrinsics.adoc new file mode 100644 index 000000000..363b02828 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/intrinsic_funcs/04_bfloat16_miscellaneous_vector_utility_intrinsics.adoc @@ -0,0 +1,30 @@ + +=== BFloat16 Miscellaneous Vector Utility Intrinsics + +[[policy-variant-reinterpret-cast-conversion]] +==== Reinterpret Cast Conversion Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-vector-lmul-extensionn]] +==== Vector LMUL Extension Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-vector-lmul-truncation]] +==== Vector LMUL Truncation Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-vector-initialization]] +==== Vector Initialization Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-vector-insertion]] +==== Vector Insertion Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-vector-extraction]] +==== Vector Extraction Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-vector-creation]] +==== Vector Creation Intrinsics +Intrinsics here don't have a policy variant. diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vfncvtbf16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vfncvtbf16.c new file mode 100644 index 000000000..a5a06f358 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vfncvtbf16.c @@ -0,0 +1,249 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_tu(vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf4_tu(vd, vs2, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_tu(vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf2_tu(vd, vs2, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_tu(vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m1_tu(vd, vs2, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_tu(vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m2_tu(vd, vs2, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_tu(vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m4_tu(vd, vs2, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_tum(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf4_tum(vm, vd, vs2, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_tum(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf2_tum(vm, vd, vs2, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m1_tum(vm, vd, vs2, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m2_tum(vm, vd, vs2, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m4_tum(vm, vd, vs2, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_tumu(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf4_tumu(vm, vd, vs2, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_tumu(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf2_tumu(vm, vd, vs2, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m1_tumu(vm, vd, vs2, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m2_tumu(vm, vd, vs2, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m4_tumu(vm, vd, vs2, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf4_mu(vm, vd, vs2, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf2_mu(vm, vd, vs2, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m1_mu(vm, vd, vs2, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m2_mu(vm, vd, vs2, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m4_mu(vm, vd, vs2, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_rm_tu(vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_rm_tu(vbfloat16mf2_t vd, + vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm_tu(vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_rm_tu(vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_rm_tu(vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_rm_tum(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_rm_tum(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm_tum(vbool16_t vm, + vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_rm_tum(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_rm_tum(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_rm_tumu(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_rm_tumu(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm_tumu(vbool16_t vm, + vbfloat16m1_t vd, + vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_rm_tumu(vbool8_t vm, + vbfloat16m2_t vd, + vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_rm_tumu(vbool4_t vm, + vbfloat16m4_t vd, + vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_rm_mu(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_rm_mu(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm_mu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_rm_mu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_rm_mu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_f_w_bf16m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vfwcvtbf16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vfwcvtbf16.c new file mode 100644 index 000000000..561d3233a --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vfwcvtbf16.c @@ -0,0 +1,108 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32mf2_tu(vd, vs2, vl); +} + +vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1_tu(vfloat32m1_t vd, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m1_tu(vd, vs2, vl); +} + +vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m2_tu(vd, vs2, vl); +} + +vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m4_tu(vd, vs2, vl); +} + +vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m8_tu(vd, vs2, vl); +} + +vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32mf2_tum(vm, vd, vs2, vl); +} + +vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m1_tum(vm, vd, vs2, vl); +} + +vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m2_tum(vm, vd, vs2, vl); +} + +vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m4_tum(vm, vd, vs2, vl); +} + +vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m8_tum(vm, vd, vs2, vl); +} + +vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32mf2_tumu(vm, vd, vs2, vl); +} + +vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m1_tumu(vm, vd, vs2, vl); +} + +vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m2_tumu(vm, vd, vs2, vl); +} + +vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m4_tumu(vm, vd, vs2, vl); +} + +vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m8_tumu(vm, vd, vs2, vl); +} + +vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32mf2_mu(vm, vd, vs2, vl); +} + +vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m1_mu(vm, vd, vs2, vl); +} + +vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m2_mu(vm, vd, vs2, vl); +} + +vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m4_mu(vm, vd, vs2, vl); +} + +vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_f_v_f32m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vfwmaccbf16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vfwmaccbf16.c new file mode 100644 index 000000000..5375cf6de --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vfwmaccbf16.c @@ -0,0 +1,502 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32mf2_tu(vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32mf2_tu(vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_tu(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m1_tu(vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m1_tu(vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m2_tu(vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m2_tu(vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m4_tu(vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m4_tu(vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m8_tu(vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m8_tu(vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32mf2_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32mf2_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m1_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m1_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m2_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m2_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m4_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m4_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m8_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m8_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32mf2_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32mf2_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m1_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m1_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m2_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m2_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m4_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m4_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m8_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m8_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32mf2_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32mf2_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m1_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m1_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m2_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m2_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m4_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m4_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m8_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m8_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_rm_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_rm_tu(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_rm_tu(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_rm_tu(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32mf2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_vv_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_vf_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vle16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vle16.c new file mode 100644 index 000000000..47104da04 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vle16.c @@ -0,0 +1,128 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vle16_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16_v_bf16mf4_tu(vd, rs1, vl); +} + +vbfloat16mf2_t test_vle16_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16_v_bf16mf2_tu(vd, rs1, vl); +} + +vbfloat16m1_t test_vle16_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16_v_bf16m1_tu(vd, rs1, vl); +} + +vbfloat16m2_t test_vle16_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16_v_bf16m2_tu(vd, rs1, vl); +} + +vbfloat16m4_t test_vle16_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16_v_bf16m4_tu(vd, rs1, vl); +} + +vbfloat16m8_t test_vle16_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16_v_bf16m8_tu(vd, rs1, vl); +} + +vbfloat16mf4_t test_vle16_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16mf4_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2_t test_vle16_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16mf2_tum(vm, vd, rs1, vl); +} + +vbfloat16m1_t test_vle16_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m1_tum(vm, vd, rs1, vl); +} + +vbfloat16m2_t test_vle16_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m2_tum(vm, vd, rs1, vl); +} + +vbfloat16m4_t test_vle16_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m4_tum(vm, vd, rs1, vl); +} + +vbfloat16m8_t test_vle16_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m8_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4_t test_vle16_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16mf4_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2_t test_vle16_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16mf2_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1_t test_vle16_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m1_tumu(vm, vd, rs1, vl); +} + +vbfloat16m2_t test_vle16_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m2_tumu(vm, vd, rs1, vl); +} + +vbfloat16m4_t test_vle16_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m4_tumu(vm, vd, rs1, vl); +} + +vbfloat16m8_t test_vle16_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m8_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4_t test_vle16_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16mf4_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2_t test_vle16_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16mf2_mu(vm, vd, rs1, vl); +} + +vbfloat16m1_t test_vle16_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m1_mu(vm, vd, rs1, vl); +} + +vbfloat16m2_t test_vle16_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m2_mu(vm, vd, rs1, vl); +} + +vbfloat16m4_t test_vle16_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m4_mu(vm, vd, rs1, vl); +} + +vbfloat16m8_t test_vle16_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_v_bf16m8_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vle16ff.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vle16ff.c new file mode 100644 index 000000000..210e224f7 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vle16ff.c @@ -0,0 +1,146 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vle16ff_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_bf16mf4_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2_t test_vle16ff_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_bf16mf2_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1_t test_vle16ff_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_bf16m1_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m2_t test_vle16ff_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_bf16m2_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m4_t test_vle16ff_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_bf16m4_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m8_t test_vle16ff_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_v_bf16m8_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4_t test_vle16ff_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16mf4_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2_t test_vle16ff_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16mf2_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1_t test_vle16ff_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m1_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2_t test_vle16ff_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m2_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m4_t test_vle16ff_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m4_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m8_t test_vle16ff_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m8_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4_t test_vle16ff_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16mf4_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2_t test_vle16ff_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16mf2_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1_t test_vle16ff_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m1_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2_t test_vle16ff_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m2_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m4_t test_vle16ff_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m4_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m8_t test_vle16ff_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m8_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4_t test_vle16ff_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16mf4_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2_t test_vle16ff_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16mf2_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1_t test_vle16ff_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m1_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2_t test_vle16ff_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m2_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m4_t test_vle16ff_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m4_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m8_t test_vle16ff_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_v_bf16m8_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vloxei16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vloxei16.c new file mode 100644 index 000000000..8609a6866 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vloxei16.c @@ -0,0 +1,146 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vloxei16_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_v_bf16mf4_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vloxei16_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_v_bf16mf2_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vloxei16_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_v_bf16m1_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vloxei16_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_v_bf16m2_tu(vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vloxei16_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_v_bf16m4_tu(vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vloxei16_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, + vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_v_bf16m8_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vloxei16_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16mf4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vloxei16_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16mf2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vloxei16_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m1_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vloxei16_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vloxei16_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vloxei16_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m8_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vloxei16_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16mf4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vloxei16_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16mf2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vloxei16_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m1_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vloxei16_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vloxei16_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vloxei16_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m8_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vloxei16_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16mf4_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vloxei16_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16mf2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vloxei16_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m1_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vloxei16_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vloxei16_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m4_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vloxei16_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vloxei16_v_bf16m8_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vloxseg2ei16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vloxseg2ei16.c new file mode 100644 index 000000000..76a1f0dd3 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vloxseg2ei16.c @@ -0,0 +1,145 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16mf4x2_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16mf2x2_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m1x2_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m2x2_tu(vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m4x2_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tum(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg2ei16_v_bf16mf4x2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tum(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg2ei16_v_bf16mf2x2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tum(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m1x2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tum(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m2x2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tum(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m4x2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tumu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg2ei16_v_bf16mf4x2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tumu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg2ei16_v_bf16mf2x2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tumu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m1x2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tumu(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m2x2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tumu(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m4x2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_mu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16mf4x2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_mu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16mf2x2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_mu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m1x2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m2x2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_v_bf16m4x2_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vloxseg3ei16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vloxseg3ei16.c new file mode 100644 index 000000000..0e7deb619 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vloxseg3ei16.c @@ -0,0 +1,119 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16mf4x3_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16mf2x3_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16m1x3_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16m2x3_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tum(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg3ei16_v_bf16mf4x3_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tum(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg3ei16_v_bf16mf2x3_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tum(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16m1x3_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tum(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16m2x3_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tumu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg3ei16_v_bf16mf4x3_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tumu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg3ei16_v_bf16mf2x3_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tumu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16m1x3_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tumu(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16m2x3_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_mu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16mf4x3_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_mu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16mf2x3_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_mu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16m1x3_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_v_bf16m2x3_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vloxseg4ei16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vloxseg4ei16.c new file mode 100644 index 000000000..0a4ff1b91 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vloxseg4ei16.c @@ -0,0 +1,119 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16mf4x4_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16mf2x4_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16m1x4_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16m2x4_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tum(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg4ei16_v_bf16mf4x4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tum(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg4ei16_v_bf16mf2x4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tum(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16m1x4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tum(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16m2x4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tumu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg4ei16_v_bf16mf4x4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tumu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg4ei16_v_bf16mf2x4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tumu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16m1x4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tumu(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16m2x4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_mu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16mf4x4_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_mu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16mf2x4_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_mu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16m1x4_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_v_bf16m2x4_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vloxseg5ei16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vloxseg5ei16.c new file mode 100644 index 000000000..22051b5bb --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vloxseg5ei16.c @@ -0,0 +1,93 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_bf16mf4x5_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_bf16mf2x5_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_bf16m1x5_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tum(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg5ei16_v_bf16mf4x5_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tum(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg5ei16_v_bf16mf2x5_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tum(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_bf16m1x5_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tumu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg5ei16_v_bf16mf4x5_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tumu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg5ei16_v_bf16mf2x5_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tumu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_bf16m1x5_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_mu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_bf16mf4x5_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_mu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_bf16mf2x5_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_mu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_v_bf16m1x5_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vloxseg6ei16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vloxseg6ei16.c new file mode 100644 index 000000000..464db0e1c --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vloxseg6ei16.c @@ -0,0 +1,93 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_bf16mf4x6_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_bf16mf2x6_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_bf16m1x6_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tum(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg6ei16_v_bf16mf4x6_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tum(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg6ei16_v_bf16mf2x6_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tum(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_bf16m1x6_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tumu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg6ei16_v_bf16mf4x6_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tumu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg6ei16_v_bf16mf2x6_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tumu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_bf16m1x6_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_mu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_bf16mf4x6_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_mu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_bf16mf2x6_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_mu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_v_bf16m1x6_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vloxseg7ei16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vloxseg7ei16.c new file mode 100644 index 000000000..b2ed153d6 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vloxseg7ei16.c @@ -0,0 +1,93 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_bf16mf4x7_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_bf16mf2x7_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_bf16m1x7_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tum(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg7ei16_v_bf16mf4x7_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tum(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg7ei16_v_bf16mf2x7_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tum(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_bf16m1x7_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tumu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg7ei16_v_bf16mf4x7_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tumu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg7ei16_v_bf16mf2x7_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tumu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_bf16m1x7_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_mu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_bf16mf4x7_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_mu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_bf16mf2x7_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_mu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_v_bf16m1x7_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vloxseg8ei16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vloxseg8ei16.c new file mode 100644 index 000000000..9834b9540 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vloxseg8ei16.c @@ -0,0 +1,93 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_bf16mf4x8_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_bf16mf2x8_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_bf16m1x8_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tum(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg8ei16_v_bf16mf4x8_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tum(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg8ei16_v_bf16mf2x8_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tum(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_bf16m1x8_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tumu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg8ei16_v_bf16mf4x8_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tumu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg8ei16_v_bf16mf2x8_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tumu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_bf16m1x8_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_mu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_bf16mf4x8_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_mu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_bf16mf2x8_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_mu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_v_bf16m1x8_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlse16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlse16.c new file mode 100644 index 000000000..03440c818 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlse16.c @@ -0,0 +1,146 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vlse16_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_bf16mf4_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vlse16_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_bf16mf2_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vlse16_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_bf16m1_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vlse16_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_bf16m2_tu(vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vlse16_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_bf16m4_tu(vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vlse16_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_v_bf16m8_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vlse16_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16mf4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vlse16_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16mf2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vlse16_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m1_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vlse16_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vlse16_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vlse16_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m8_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vlse16_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16mf4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vlse16_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16mf2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vlse16_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m1_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vlse16_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vlse16_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vlse16_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m8_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vlse16_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16mf4_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vlse16_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16mf2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vlse16_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m1_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vlse16_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vlse16_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m4_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vlse16_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_v_bf16m8_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg2e16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg2e16.c new file mode 100644 index 000000000..c06a4e3e9 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg2e16.c @@ -0,0 +1,114 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x2_t test_vlseg2e16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16mf4x2_tu(vd, rs1, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16mf2x2_tu(vd, rs1, vl); +} + +vbfloat16m1x2_t test_vlseg2e16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16m1x2_tu(vd, rs1, vl); +} + +vbfloat16m2x2_t test_vlseg2e16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16m2x2_tu(vd, rs1, vl); +} + +vbfloat16m4x2_t test_vlseg2e16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16m4x2_tu(vd, rs1, vl); +} + +vbfloat16mf4x2_t test_vlseg2e16_v_bf16mf4x2_tum(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16mf4x2_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16_v_bf16mf2x2_tum(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16mf2x2_tum(vm, vd, rs1, vl); +} + +vbfloat16m1x2_t test_vlseg2e16_v_bf16m1x2_tum(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16m1x2_tum(vm, vd, rs1, vl); +} + +vbfloat16m2x2_t test_vlseg2e16_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16m2x2_tum(vm, vd, rs1, vl); +} + +vbfloat16m4x2_t test_vlseg2e16_v_bf16m4x2_tum(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16m4x2_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4x2_t test_vlseg2e16_v_bf16mf4x2_tumu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16mf4x2_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16_v_bf16mf2x2_tumu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16mf2x2_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1x2_t test_vlseg2e16_v_bf16m1x2_tumu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16m1x2_tumu(vm, vd, rs1, vl); +} + +vbfloat16m2x2_t test_vlseg2e16_v_bf16m2x2_tumu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16m2x2_tumu(vm, vd, rs1, vl); +} + +vbfloat16m4x2_t test_vlseg2e16_v_bf16m4x2_tumu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16m4x2_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4x2_t test_vlseg2e16_v_bf16mf4x2_mu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16mf4x2_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16_v_bf16mf2x2_mu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16mf2x2_mu(vm, vd, rs1, vl); +} + +vbfloat16m1x2_t test_vlseg2e16_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16m1x2_mu(vm, vd, rs1, vl); +} + +vbfloat16m2x2_t test_vlseg2e16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16m2x2_mu(vm, vd, rs1, vl); +} + +vbfloat16m4x2_t test_vlseg2e16_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_v_bf16m4x2_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg2e16ff.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg2e16ff.c new file mode 100644 index 000000000..5d6ba8a67 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg2e16ff.c @@ -0,0 +1,138 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16mf4x2_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16mf2x2_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_tu(vbfloat16m1x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m1x2_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_tu(vbfloat16m2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m2x2_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_tu(vbfloat16m4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m4x2_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_tum(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16mf4x2_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_tum(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16mf2x2_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_tum(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m1x2_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m2x2_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_tum(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m4x2_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_tumu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16mf4x2_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_tumu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16mf2x2_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_tumu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m1x2_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_tumu(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m2x2_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_tumu(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m4x2_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_mu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16mf4x2_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_mu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16mf2x2_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m1x2_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m2x2_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_v_bf16m4x2_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg3e16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg3e16.c new file mode 100644 index 000000000..d34cdd6fb --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg3e16.c @@ -0,0 +1,94 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x3_t test_vlseg3e16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16mf4x3_tu(vd, rs1, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16mf2x3_tu(vd, rs1, vl); +} + +vbfloat16m1x3_t test_vlseg3e16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16m1x3_tu(vd, rs1, vl); +} + +vbfloat16m2x3_t test_vlseg3e16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16m2x3_tu(vd, rs1, vl); +} + +vbfloat16mf4x3_t test_vlseg3e16_v_bf16mf4x3_tum(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16mf4x3_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16_v_bf16mf2x3_tum(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16mf2x3_tum(vm, vd, rs1, vl); +} + +vbfloat16m1x3_t test_vlseg3e16_v_bf16m1x3_tum(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16m1x3_tum(vm, vd, rs1, vl); +} + +vbfloat16m2x3_t test_vlseg3e16_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16m2x3_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4x3_t test_vlseg3e16_v_bf16mf4x3_tumu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16mf4x3_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16_v_bf16mf2x3_tumu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16mf2x3_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1x3_t test_vlseg3e16_v_bf16m1x3_tumu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16m1x3_tumu(vm, vd, rs1, vl); +} + +vbfloat16m2x3_t test_vlseg3e16_v_bf16m2x3_tumu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16m2x3_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4x3_t test_vlseg3e16_v_bf16mf4x3_mu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16mf4x3_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16_v_bf16mf2x3_mu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16mf2x3_mu(vm, vd, rs1, vl); +} + +vbfloat16m1x3_t test_vlseg3e16_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16m1x3_mu(vm, vd, rs1, vl); +} + +vbfloat16m2x3_t test_vlseg3e16_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_v_bf16m2x3_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg3e16ff.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg3e16ff.c new file mode 100644 index 000000000..a9ee87858 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg3e16ff.c @@ -0,0 +1,113 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16mf4x3_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16mf2x3_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_tu(vbfloat16m1x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16m1x3_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_tu(vbfloat16m2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16m2x3_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_tum(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16mf4x3_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_tum(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16mf2x3_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_tum(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16m1x3_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16m2x3_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_tumu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16mf4x3_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_tumu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16mf2x3_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_tumu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16m1x3_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_tumu(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16m2x3_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_mu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16mf4x3_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_mu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16mf2x3_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16m1x3_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_v_bf16m2x3_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg4e16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg4e16.c new file mode 100644 index 000000000..b98db339b --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg4e16.c @@ -0,0 +1,94 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x4_t test_vlseg4e16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16mf4x4_tu(vd, rs1, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16mf2x4_tu(vd, rs1, vl); +} + +vbfloat16m1x4_t test_vlseg4e16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16m1x4_tu(vd, rs1, vl); +} + +vbfloat16m2x4_t test_vlseg4e16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16m2x4_tu(vd, rs1, vl); +} + +vbfloat16mf4x4_t test_vlseg4e16_v_bf16mf4x4_tum(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16mf4x4_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16_v_bf16mf2x4_tum(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16mf2x4_tum(vm, vd, rs1, vl); +} + +vbfloat16m1x4_t test_vlseg4e16_v_bf16m1x4_tum(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16m1x4_tum(vm, vd, rs1, vl); +} + +vbfloat16m2x4_t test_vlseg4e16_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16m2x4_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4x4_t test_vlseg4e16_v_bf16mf4x4_tumu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16mf4x4_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16_v_bf16mf2x4_tumu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16mf2x4_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1x4_t test_vlseg4e16_v_bf16m1x4_tumu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16m1x4_tumu(vm, vd, rs1, vl); +} + +vbfloat16m2x4_t test_vlseg4e16_v_bf16m2x4_tumu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16m2x4_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4x4_t test_vlseg4e16_v_bf16mf4x4_mu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16mf4x4_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16_v_bf16mf2x4_mu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16mf2x4_mu(vm, vd, rs1, vl); +} + +vbfloat16m1x4_t test_vlseg4e16_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16m1x4_mu(vm, vd, rs1, vl); +} + +vbfloat16m2x4_t test_vlseg4e16_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_v_bf16m2x4_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg4e16ff.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg4e16ff.c new file mode 100644 index 000000000..4f5c81117 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg4e16ff.c @@ -0,0 +1,113 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16mf4x4_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16mf2x4_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_tu(vbfloat16m1x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16m1x4_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_tu(vbfloat16m2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16m2x4_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_tum(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16mf4x4_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_tum(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16mf2x4_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_tum(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16m1x4_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16m2x4_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_tumu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16mf4x4_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_tumu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16mf2x4_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_tumu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16m1x4_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_tumu(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16m2x4_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_mu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16mf4x4_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_mu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16mf2x4_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16m1x4_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_v_bf16m2x4_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg5e16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg5e16.c new file mode 100644 index 000000000..10fc525e8 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg5e16.c @@ -0,0 +1,74 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x5_t test_vlseg5e16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_v_bf16mf4x5_tu(vd, rs1, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_v_bf16mf2x5_tu(vd, rs1, vl); +} + +vbfloat16m1x5_t test_vlseg5e16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_v_bf16m1x5_tu(vd, rs1, vl); +} + +vbfloat16mf4x5_t test_vlseg5e16_v_bf16mf4x5_tum(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_v_bf16mf4x5_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16_v_bf16mf2x5_tum(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_v_bf16mf2x5_tum(vm, vd, rs1, vl); +} + +vbfloat16m1x5_t test_vlseg5e16_v_bf16m1x5_tum(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_v_bf16m1x5_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4x5_t test_vlseg5e16_v_bf16mf4x5_tumu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_v_bf16mf4x5_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16_v_bf16mf2x5_tumu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_v_bf16mf2x5_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1x5_t test_vlseg5e16_v_bf16m1x5_tumu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_v_bf16m1x5_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4x5_t test_vlseg5e16_v_bf16mf4x5_mu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_v_bf16mf4x5_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16_v_bf16mf2x5_mu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_v_bf16mf2x5_mu(vm, vd, rs1, vl); +} + +vbfloat16m1x5_t test_vlseg5e16_v_bf16m1x5_mu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_v_bf16m1x5_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg5e16ff.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg5e16ff.c new file mode 100644 index 000000000..152bbf79a --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg5e16ff.c @@ -0,0 +1,88 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_bf16mf4x5_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_bf16mf2x5_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_tu(vbfloat16m1x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_bf16m1x5_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_tum(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_bf16mf4x5_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_tum(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_bf16mf2x5_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_tum(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_bf16m1x5_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_tumu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_bf16mf4x5_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_tumu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_bf16mf2x5_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_tumu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_bf16m1x5_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_mu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_bf16mf4x5_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_mu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_bf16mf2x5_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_mu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_v_bf16m1x5_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg6e16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg6e16.c new file mode 100644 index 000000000..30022989d --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg6e16.c @@ -0,0 +1,74 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x6_t test_vlseg6e16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_v_bf16mf4x6_tu(vd, rs1, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_v_bf16mf2x6_tu(vd, rs1, vl); +} + +vbfloat16m1x6_t test_vlseg6e16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_v_bf16m1x6_tu(vd, rs1, vl); +} + +vbfloat16mf4x6_t test_vlseg6e16_v_bf16mf4x6_tum(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_v_bf16mf4x6_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16_v_bf16mf2x6_tum(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_v_bf16mf2x6_tum(vm, vd, rs1, vl); +} + +vbfloat16m1x6_t test_vlseg6e16_v_bf16m1x6_tum(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_v_bf16m1x6_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4x6_t test_vlseg6e16_v_bf16mf4x6_tumu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_v_bf16mf4x6_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16_v_bf16mf2x6_tumu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_v_bf16mf2x6_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1x6_t test_vlseg6e16_v_bf16m1x6_tumu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_v_bf16m1x6_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4x6_t test_vlseg6e16_v_bf16mf4x6_mu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_v_bf16mf4x6_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16_v_bf16mf2x6_mu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_v_bf16mf2x6_mu(vm, vd, rs1, vl); +} + +vbfloat16m1x6_t test_vlseg6e16_v_bf16m1x6_mu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_v_bf16m1x6_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg6e16ff.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg6e16ff.c new file mode 100644 index 000000000..8162270f6 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg6e16ff.c @@ -0,0 +1,88 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_bf16mf4x6_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_bf16mf2x6_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_tu(vbfloat16m1x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_bf16m1x6_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_tum(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_bf16mf4x6_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_tum(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_bf16mf2x6_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_tum(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_bf16m1x6_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_tumu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_bf16mf4x6_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_tumu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_bf16mf2x6_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_tumu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_bf16m1x6_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_mu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_bf16mf4x6_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_mu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_bf16mf2x6_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_mu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_v_bf16m1x6_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg7e16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg7e16.c new file mode 100644 index 000000000..4dc02df09 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg7e16.c @@ -0,0 +1,74 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x7_t test_vlseg7e16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_v_bf16mf4x7_tu(vd, rs1, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_v_bf16mf2x7_tu(vd, rs1, vl); +} + +vbfloat16m1x7_t test_vlseg7e16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_v_bf16m1x7_tu(vd, rs1, vl); +} + +vbfloat16mf4x7_t test_vlseg7e16_v_bf16mf4x7_tum(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_v_bf16mf4x7_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16_v_bf16mf2x7_tum(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_v_bf16mf2x7_tum(vm, vd, rs1, vl); +} + +vbfloat16m1x7_t test_vlseg7e16_v_bf16m1x7_tum(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_v_bf16m1x7_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4x7_t test_vlseg7e16_v_bf16mf4x7_tumu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_v_bf16mf4x7_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16_v_bf16mf2x7_tumu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_v_bf16mf2x7_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1x7_t test_vlseg7e16_v_bf16m1x7_tumu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_v_bf16m1x7_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4x7_t test_vlseg7e16_v_bf16mf4x7_mu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_v_bf16mf4x7_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16_v_bf16mf2x7_mu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_v_bf16mf2x7_mu(vm, vd, rs1, vl); +} + +vbfloat16m1x7_t test_vlseg7e16_v_bf16m1x7_mu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_v_bf16m1x7_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg7e16ff.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg7e16ff.c new file mode 100644 index 000000000..9be0d7480 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg7e16ff.c @@ -0,0 +1,88 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_bf16mf4x7_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_bf16mf2x7_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_tu(vbfloat16m1x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_bf16m1x7_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_tum(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_bf16mf4x7_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_tum(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_bf16mf2x7_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_tum(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_bf16m1x7_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_tumu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_bf16mf4x7_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_tumu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_bf16mf2x7_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_tumu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_bf16m1x7_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_mu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_bf16mf4x7_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_mu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_bf16mf2x7_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_mu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_v_bf16m1x7_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg8e16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg8e16.c new file mode 100644 index 000000000..56f4d3c4b --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg8e16.c @@ -0,0 +1,74 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x8_t test_vlseg8e16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_v_bf16mf4x8_tu(vd, rs1, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_v_bf16mf2x8_tu(vd, rs1, vl); +} + +vbfloat16m1x8_t test_vlseg8e16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_v_bf16m1x8_tu(vd, rs1, vl); +} + +vbfloat16mf4x8_t test_vlseg8e16_v_bf16mf4x8_tum(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_v_bf16mf4x8_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16_v_bf16mf2x8_tum(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_v_bf16mf2x8_tum(vm, vd, rs1, vl); +} + +vbfloat16m1x8_t test_vlseg8e16_v_bf16m1x8_tum(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_v_bf16m1x8_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4x8_t test_vlseg8e16_v_bf16mf4x8_tumu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_v_bf16mf4x8_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16_v_bf16mf2x8_tumu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_v_bf16mf2x8_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1x8_t test_vlseg8e16_v_bf16m1x8_tumu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_v_bf16m1x8_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4x8_t test_vlseg8e16_v_bf16mf4x8_mu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_v_bf16mf4x8_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16_v_bf16mf2x8_mu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_v_bf16mf2x8_mu(vm, vd, rs1, vl); +} + +vbfloat16m1x8_t test_vlseg8e16_v_bf16m1x8_mu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_v_bf16m1x8_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg8e16ff.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg8e16ff.c new file mode 100644 index 000000000..20ad03e7d --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlseg8e16ff.c @@ -0,0 +1,88 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_bf16mf4x8_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_bf16mf2x8_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_tu(vbfloat16m1x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_bf16m1x8_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_tum(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_bf16mf4x8_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_tum(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_bf16mf2x8_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_tum(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_bf16m1x8_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_tumu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_bf16mf4x8_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_tumu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_bf16mf2x8_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_tumu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_bf16m1x8_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_mu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_bf16mf4x8_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_mu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_bf16mf2x8_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_mu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_v_bf16m1x8_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlsseg2e16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlsseg2e16.c new file mode 100644 index 000000000..8d6192e4d --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlsseg2e16.c @@ -0,0 +1,135 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x2_t test_vlsseg2e16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16mf4x2_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vlsseg2e16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16mf2x2_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vlsseg2e16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_v_bf16m1x2_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vlsseg2e16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_v_bf16m2x2_tu(vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vlsseg2e16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_v_bf16m4x2_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vlsseg2e16_v_bf16mf4x2_tum(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16mf4x2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vlsseg2e16_v_bf16mf2x2_tum(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16mf2x2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vlsseg2e16_v_bf16m1x2_tum(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_v_bf16m1x2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vlsseg2e16_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_v_bf16m2x2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vlsseg2e16_v_bf16m4x2_tum(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_v_bf16m4x2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vlsseg2e16_v_bf16mf4x2_tumu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16mf4x2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vlsseg2e16_v_bf16mf2x2_tumu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16mf2x2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vlsseg2e16_v_bf16m1x2_tumu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16m1x2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vlsseg2e16_v_bf16m2x2_tumu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16m2x2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vlsseg2e16_v_bf16m4x2_tumu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16m4x2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vlsseg2e16_v_bf16mf4x2_mu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16mf4x2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vlsseg2e16_v_bf16mf2x2_mu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_v_bf16mf2x2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vlsseg2e16_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_v_bf16m1x2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vlsseg2e16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_v_bf16m2x2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vlsseg2e16_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_v_bf16m4x2_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlsseg3e16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlsseg3e16.c new file mode 100644 index 000000000..68b3102ff --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlsseg3e16.c @@ -0,0 +1,111 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x3_t test_vlsseg3e16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_bf16mf4x3_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vlsseg3e16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_bf16mf2x3_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vlsseg3e16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_v_bf16m1x3_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vlsseg3e16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_v_bf16m2x3_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vlsseg3e16_v_bf16mf4x3_tum(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_bf16mf4x3_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vlsseg3e16_v_bf16mf2x3_tum(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_bf16mf2x3_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vlsseg3e16_v_bf16m1x3_tum(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_v_bf16m1x3_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vlsseg3e16_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_v_bf16m2x3_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vlsseg3e16_v_bf16mf4x3_tumu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_bf16mf4x3_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vlsseg3e16_v_bf16mf2x3_tumu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_bf16mf2x3_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vlsseg3e16_v_bf16m1x3_tumu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_bf16m1x3_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vlsseg3e16_v_bf16m2x3_tumu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_bf16m2x3_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vlsseg3e16_v_bf16mf4x3_mu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_bf16mf4x3_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vlsseg3e16_v_bf16mf2x3_mu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_v_bf16mf2x3_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vlsseg3e16_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_v_bf16m1x3_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vlsseg3e16_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_v_bf16m2x3_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlsseg4e16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlsseg4e16.c new file mode 100644 index 000000000..b9597ec05 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlsseg4e16.c @@ -0,0 +1,111 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x4_t test_vlsseg4e16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_bf16mf4x4_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vlsseg4e16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_bf16mf2x4_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vlsseg4e16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_v_bf16m1x4_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vlsseg4e16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_v_bf16m2x4_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vlsseg4e16_v_bf16mf4x4_tum(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_bf16mf4x4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vlsseg4e16_v_bf16mf2x4_tum(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_bf16mf2x4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vlsseg4e16_v_bf16m1x4_tum(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_v_bf16m1x4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vlsseg4e16_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_v_bf16m2x4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vlsseg4e16_v_bf16mf4x4_tumu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_bf16mf4x4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vlsseg4e16_v_bf16mf2x4_tumu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_bf16mf2x4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vlsseg4e16_v_bf16m1x4_tumu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_bf16m1x4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vlsseg4e16_v_bf16m2x4_tumu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_bf16m2x4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vlsseg4e16_v_bf16mf4x4_mu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_bf16mf4x4_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vlsseg4e16_v_bf16mf2x4_mu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_v_bf16mf2x4_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vlsseg4e16_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_v_bf16m1x4_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vlsseg4e16_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_v_bf16m2x4_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlsseg5e16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlsseg5e16.c new file mode 100644 index 000000000..35de70c99 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlsseg5e16.c @@ -0,0 +1,87 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x5_t test_vlsseg5e16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_bf16mf4x5_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vlsseg5e16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_bf16mf2x5_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vlsseg5e16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg5e16_v_bf16m1x5_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vlsseg5e16_v_bf16mf4x5_tum(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_bf16mf4x5_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vlsseg5e16_v_bf16mf2x5_tum(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_bf16mf2x5_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vlsseg5e16_v_bf16m1x5_tum(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg5e16_v_bf16m1x5_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vlsseg5e16_v_bf16mf4x5_tumu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_bf16mf4x5_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vlsseg5e16_v_bf16mf2x5_tumu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_bf16mf2x5_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vlsseg5e16_v_bf16m1x5_tumu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_bf16m1x5_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vlsseg5e16_v_bf16mf4x5_mu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_bf16mf4x5_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vlsseg5e16_v_bf16mf2x5_mu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_v_bf16mf2x5_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vlsseg5e16_v_bf16m1x5_mu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg5e16_v_bf16m1x5_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlsseg6e16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlsseg6e16.c new file mode 100644 index 000000000..4250d7c05 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlsseg6e16.c @@ -0,0 +1,87 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x6_t test_vlsseg6e16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_bf16mf4x6_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vlsseg6e16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_bf16mf2x6_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vlsseg6e16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg6e16_v_bf16m1x6_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vlsseg6e16_v_bf16mf4x6_tum(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_bf16mf4x6_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vlsseg6e16_v_bf16mf2x6_tum(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_bf16mf2x6_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vlsseg6e16_v_bf16m1x6_tum(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg6e16_v_bf16m1x6_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vlsseg6e16_v_bf16mf4x6_tumu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_bf16mf4x6_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vlsseg6e16_v_bf16mf2x6_tumu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_bf16mf2x6_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vlsseg6e16_v_bf16m1x6_tumu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_bf16m1x6_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vlsseg6e16_v_bf16mf4x6_mu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_bf16mf4x6_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vlsseg6e16_v_bf16mf2x6_mu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_v_bf16mf2x6_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vlsseg6e16_v_bf16m1x6_mu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg6e16_v_bf16m1x6_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlsseg7e16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlsseg7e16.c new file mode 100644 index 000000000..c355f95e3 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlsseg7e16.c @@ -0,0 +1,87 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x7_t test_vlsseg7e16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_bf16mf4x7_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vlsseg7e16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_bf16mf2x7_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vlsseg7e16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg7e16_v_bf16m1x7_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vlsseg7e16_v_bf16mf4x7_tum(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_bf16mf4x7_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vlsseg7e16_v_bf16mf2x7_tum(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_bf16mf2x7_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vlsseg7e16_v_bf16m1x7_tum(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg7e16_v_bf16m1x7_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vlsseg7e16_v_bf16mf4x7_tumu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_bf16mf4x7_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vlsseg7e16_v_bf16mf2x7_tumu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_bf16mf2x7_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vlsseg7e16_v_bf16m1x7_tumu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_bf16m1x7_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vlsseg7e16_v_bf16mf4x7_mu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_bf16mf4x7_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vlsseg7e16_v_bf16mf2x7_mu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_v_bf16mf2x7_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vlsseg7e16_v_bf16m1x7_mu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg7e16_v_bf16m1x7_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlsseg8e16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlsseg8e16.c new file mode 100644 index 000000000..7c450c4f5 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vlsseg8e16.c @@ -0,0 +1,87 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x8_t test_vlsseg8e16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_bf16mf4x8_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vlsseg8e16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_bf16mf2x8_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vlsseg8e16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg8e16_v_bf16m1x8_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vlsseg8e16_v_bf16mf4x8_tum(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_bf16mf4x8_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vlsseg8e16_v_bf16mf2x8_tum(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_bf16mf2x8_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vlsseg8e16_v_bf16m1x8_tum(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg8e16_v_bf16m1x8_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vlsseg8e16_v_bf16mf4x8_tumu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_bf16mf4x8_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vlsseg8e16_v_bf16mf2x8_tumu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_bf16mf2x8_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vlsseg8e16_v_bf16m1x8_tumu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_bf16m1x8_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vlsseg8e16_v_bf16mf4x8_mu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_bf16mf4x8_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vlsseg8e16_v_bf16mf2x8_mu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_v_bf16mf2x8_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vlsseg8e16_v_bf16m1x8_mu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg8e16_v_bf16m1x8_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vluxei16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vluxei16.c new file mode 100644 index 000000000..c75a51e4f --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vluxei16.c @@ -0,0 +1,146 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vluxei16_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_v_bf16mf4_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vluxei16_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_v_bf16mf2_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vluxei16_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_v_bf16m1_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vluxei16_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_v_bf16m2_tu(vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vluxei16_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_v_bf16m4_tu(vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vluxei16_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, + vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_v_bf16m8_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vluxei16_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16mf4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vluxei16_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16mf2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vluxei16_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m1_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vluxei16_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vluxei16_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vluxei16_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m8_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vluxei16_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16mf4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vluxei16_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16mf2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vluxei16_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m1_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vluxei16_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vluxei16_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vluxei16_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m8_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vluxei16_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16mf4_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vluxei16_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16mf2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vluxei16_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m1_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vluxei16_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vluxei16_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m4_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vluxei16_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vluxei16_v_bf16m8_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vluxseg2ei16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vluxseg2ei16.c new file mode 100644 index 000000000..dc7426b69 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vluxseg2ei16.c @@ -0,0 +1,145 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16mf4x2_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16mf2x2_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m1x2_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m2x2_tu(vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m4x2_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tum(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg2ei16_v_bf16mf4x2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tum(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg2ei16_v_bf16mf2x2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tum(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m1x2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tum(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m2x2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tum(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m4x2_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tumu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg2ei16_v_bf16mf4x2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tumu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg2ei16_v_bf16mf2x2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tumu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m1x2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tumu(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m2x2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tumu(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m4x2_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_mu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16mf4x2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_mu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16mf2x2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_mu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m1x2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m2x2_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_v_bf16m4x2_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vluxseg3ei16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vluxseg3ei16.c new file mode 100644 index 000000000..339f4bcd8 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vluxseg3ei16.c @@ -0,0 +1,119 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16mf4x3_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16mf2x3_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16m1x3_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16m2x3_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tum(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg3ei16_v_bf16mf4x3_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tum(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg3ei16_v_bf16mf2x3_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tum(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16m1x3_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tum(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16m2x3_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tumu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg3ei16_v_bf16mf4x3_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tumu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg3ei16_v_bf16mf2x3_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tumu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16m1x3_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tumu(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16m2x3_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_mu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16mf4x3_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_mu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16mf2x3_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_mu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16m1x3_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_v_bf16m2x3_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vluxseg4ei16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vluxseg4ei16.c new file mode 100644 index 000000000..42fcd4725 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vluxseg4ei16.c @@ -0,0 +1,119 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16mf4x4_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16mf2x4_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16m1x4_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16m2x4_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tum(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg4ei16_v_bf16mf4x4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tum(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg4ei16_v_bf16mf2x4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tum(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16m1x4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tum(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16m2x4_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tumu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg4ei16_v_bf16mf4x4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tumu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg4ei16_v_bf16mf2x4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tumu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16m1x4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tumu(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16m2x4_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_mu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16mf4x4_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_mu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16mf2x4_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_mu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16m1x4_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_v_bf16m2x4_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vluxseg5ei16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vluxseg5ei16.c new file mode 100644 index 000000000..e82ca2299 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vluxseg5ei16.c @@ -0,0 +1,93 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_bf16mf4x5_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_bf16mf2x5_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_bf16m1x5_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tum(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg5ei16_v_bf16mf4x5_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tum(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg5ei16_v_bf16mf2x5_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tum(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_bf16m1x5_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tumu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg5ei16_v_bf16mf4x5_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tumu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg5ei16_v_bf16mf2x5_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tumu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_bf16m1x5_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_mu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_bf16mf4x5_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_mu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_bf16mf2x5_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_mu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_v_bf16m1x5_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vluxseg6ei16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vluxseg6ei16.c new file mode 100644 index 000000000..09c61672a --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vluxseg6ei16.c @@ -0,0 +1,93 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_bf16mf4x6_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_bf16mf2x6_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_bf16m1x6_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tum(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg6ei16_v_bf16mf4x6_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tum(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg6ei16_v_bf16mf2x6_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tum(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_bf16m1x6_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tumu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg6ei16_v_bf16mf4x6_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tumu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg6ei16_v_bf16mf2x6_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tumu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_bf16m1x6_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_mu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_bf16mf4x6_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_mu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_bf16mf2x6_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_mu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_v_bf16m1x6_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vluxseg7ei16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vluxseg7ei16.c new file mode 100644 index 000000000..4c0ad1e09 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vluxseg7ei16.c @@ -0,0 +1,93 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_bf16mf4x7_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_bf16mf2x7_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_bf16m1x7_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tum(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg7ei16_v_bf16mf4x7_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tum(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg7ei16_v_bf16mf2x7_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tum(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_bf16m1x7_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tumu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg7ei16_v_bf16mf4x7_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tumu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg7ei16_v_bf16mf2x7_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tumu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_bf16m1x7_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_mu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_bf16mf4x7_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_mu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_bf16mf2x7_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_mu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_v_bf16m1x7_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vluxseg8ei16.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vluxseg8ei16.c new file mode 100644 index 000000000..cbe595cae --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vluxseg8ei16.c @@ -0,0 +1,93 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_bf16mf4x8_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_bf16mf2x8_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_bf16m1x8_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tum(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg8ei16_v_bf16mf4x8_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tum(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg8ei16_v_bf16mf2x8_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tum(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_bf16m1x8_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tumu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg8ei16_v_bf16mf4x8_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tumu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg8ei16_v_bf16mf2x8_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tumu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_bf16m1x8_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_mu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_bf16mf4x8_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_mu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_bf16mf2x8_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_mu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_v_bf16m1x8_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vmerge.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vmerge.c new file mode 100644 index 000000000..d8381141a --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vmerge.c @@ -0,0 +1,44 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vmerge_vvm_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, vbool64_t v0, + size_t vl) { + return __riscv_vmerge_vvm_bf16mf4_tu(vd, vs2, vs1, v0, vl); +} + +vbfloat16mf2_t test_vmerge_vvm_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, vbool32_t v0, + size_t vl) { + return __riscv_vmerge_vvm_bf16mf2_tu(vd, vs2, vs1, v0, vl); +} + +vbfloat16m1_t test_vmerge_vvm_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, vbool16_t v0, + size_t vl) { + return __riscv_vmerge_vvm_bf16m1_tu(vd, vs2, vs1, v0, vl); +} + +vbfloat16m2_t test_vmerge_vvm_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, vbool8_t v0, + size_t vl) { + return __riscv_vmerge_vvm_bf16m2_tu(vd, vs2, vs1, v0, vl); +} + +vbfloat16m4_t test_vmerge_vvm_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, vbool4_t v0, + size_t vl) { + return __riscv_vmerge_vvm_bf16m4_tu(vd, vs2, vs1, v0, vl); +} + +vbfloat16m8_t test_vmerge_vvm_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs2, + vbfloat16m8_t vs1, vbool2_t v0, + size_t vl) { + return __riscv_vmerge_vvm_bf16m8_tu(vd, vs2, vs1, v0, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vmv.c b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vmv.c new file mode 100644 index 000000000..b4d091c72 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-api-tests/vmv.c @@ -0,0 +1,38 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vmv_v_v_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vmv_v_v_bf16mf4_tu(vd, vs1, vl); +} + +vbfloat16mf2_t test_vmv_v_v_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vmv_v_v_bf16mf2_tu(vd, vs1, vl); +} + +vbfloat16m1_t test_vmv_v_v_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vmv_v_v_bf16m1_tu(vd, vs1, vl); +} + +vbfloat16m2_t test_vmv_v_v_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vmv_v_v_bf16m2_tu(vd, vs1, vl); +} + +vbfloat16m4_t test_vmv_v_v_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vmv_v_v_bf16m4_tu(vd, vs1, vl); +} + +vbfloat16m8_t test_vmv_v_v_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, + size_t vl) { + return __riscv_vmv_v_v_bf16m8_tu(vd, vs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vfncvtbf16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vfncvtbf16.c new file mode 100644 index 000000000..6d0610085 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vfncvtbf16.c @@ -0,0 +1,234 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_tu(vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tu(vd, vs2, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_tu(vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tu(vd, vs2, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_tu(vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tu(vd, vs2, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_tu(vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tu(vd, vs2, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_tu(vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tu(vd, vs2, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_tum(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tum(vm, vd, vs2, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_tum(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tum(vm, vd, vs2, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tum(vm, vd, vs2, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tum(vm, vd, vs2, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tum(vm, vd, vs2, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_tumu(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_tumu(vm, vd, vs2, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_tumu(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tumu(vm, vd, vs2, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tumu(vm, vd, vs2, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tumu(vm, vd, vs2, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tumu(vm, vd, vs2, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_mu(vm, vd, vs2, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_mu(vm, vd, vs2, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_mu(vm, vd, vs2, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_mu(vm, vd, vs2, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_mu(vm, vd, vs2, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_rm_tu(vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_rm_tu(vbfloat16mf2_t vd, + vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm_tu(vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_rm_tu(vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_rm_tu(vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_rm_tum(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_rm_tum(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm_tum(vbool16_t vm, + vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_rm_tum(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_rm_tum(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_rm_tumu(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_rm_tumu(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm_tumu(vbool16_t vm, + vbfloat16m1_t vd, + vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_rm_tumu(vbool8_t vm, + vbfloat16m2_t vd, + vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_rm_tumu(vbool4_t vm, + vbfloat16m4_t vd, + vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_rm_mu(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_rm_mu(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm_mu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_rm_mu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_rm_mu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vfwcvtbf16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vfwcvtbf16.c new file mode 100644 index 000000000..8cd98cc9b --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vfwcvtbf16.c @@ -0,0 +1,108 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_tu(vd, vs2, vl); +} + +vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1_tu(vfloat32m1_t vd, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f_tu(vd, vs2, vl); +} + +vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f_tu(vd, vs2, vl); +} + +vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f_tu(vd, vs2, vl); +} + +vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f_tu(vd, vs2, vl); +} + +vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_tum(vm, vd, vs2, vl); +} + +vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_tum(vm, vd, vs2, vl); +} + +vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_tum(vm, vd, vs2, vl); +} + +vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_tum(vm, vd, vs2, vl); +} + +vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_tum(vm, vd, vs2, vl); +} + +vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_tumu(vm, vd, vs2, vl); +} + +vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_tumu(vm, vd, vs2, vl); +} + +vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_tumu(vm, vd, vs2, vl); +} + +vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_tumu(vm, vd, vs2, vl); +} + +vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_tumu(vm, vd, vs2, vl); +} + +vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_mu(vm, vd, vs2, vl); +} + +vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_mu(vm, vd, vs2, vl); +} + +vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_mu(vm, vd, vs2, vl); +} + +vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_mu(vm, vd, vs2, vl); +} + +vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vfwmaccbf16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vfwmaccbf16.c new file mode 100644 index 000000000..962e7237c --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vfwmaccbf16.c @@ -0,0 +1,472 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_tu(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_rm_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_rm_tu(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_rm_tu(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_rm_tu(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vle16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vle16.c new file mode 100644 index 000000000..02fccbe25 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vle16.c @@ -0,0 +1,128 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vle16_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16_tu(vd, rs1, vl); +} + +vbfloat16mf2_t test_vle16_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16_tu(vd, rs1, vl); +} + +vbfloat16m1_t test_vle16_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16_tu(vd, rs1, vl); +} + +vbfloat16m2_t test_vle16_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16_tu(vd, rs1, vl); +} + +vbfloat16m4_t test_vle16_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16_tu(vd, rs1, vl); +} + +vbfloat16m8_t test_vle16_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16_tu(vd, rs1, vl); +} + +vbfloat16mf4_t test_vle16_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2_t test_vle16_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_tum(vm, vd, rs1, vl); +} + +vbfloat16m1_t test_vle16_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_tum(vm, vd, rs1, vl); +} + +vbfloat16m2_t test_vle16_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_tum(vm, vd, rs1, vl); +} + +vbfloat16m4_t test_vle16_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_tum(vm, vd, rs1, vl); +} + +vbfloat16m8_t test_vle16_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4_t test_vle16_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2_t test_vle16_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1_t test_vle16_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_tumu(vm, vd, rs1, vl); +} + +vbfloat16m2_t test_vle16_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_tumu(vm, vd, rs1, vl); +} + +vbfloat16m4_t test_vle16_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_tumu(vm, vd, rs1, vl); +} + +vbfloat16m8_t test_vle16_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4_t test_vle16_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2_t test_vle16_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_mu(vm, vd, rs1, vl); +} + +vbfloat16m1_t test_vle16_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_mu(vm, vd, rs1, vl); +} + +vbfloat16m2_t test_vle16_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_mu(vm, vd, rs1, vl); +} + +vbfloat16m4_t test_vle16_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_mu(vm, vd, rs1, vl); +} + +vbfloat16m8_t test_vle16_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vle16ff.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vle16ff.c new file mode 100644 index 000000000..73fc4c812 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vle16ff.c @@ -0,0 +1,146 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vle16ff_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2_t test_vle16ff_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1_t test_vle16ff_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m2_t test_vle16ff_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m4_t test_vle16ff_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m8_t test_vle16ff_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4_t test_vle16ff_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2_t test_vle16ff_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1_t test_vle16ff_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2_t test_vle16ff_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m4_t test_vle16ff_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m8_t test_vle16ff_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4_t test_vle16ff_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2_t test_vle16ff_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1_t test_vle16ff_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2_t test_vle16ff_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m4_t test_vle16ff_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m8_t test_vle16ff_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4_t test_vle16ff_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2_t test_vle16ff_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1_t test_vle16ff_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2_t test_vle16ff_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m4_t test_vle16ff_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m8_t test_vle16ff_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vloxei16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vloxei16.c new file mode 100644 index 000000000..93488af5a --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vloxei16.c @@ -0,0 +1,146 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vloxei16_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vloxei16_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vloxei16_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vloxei16_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vloxei16_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vloxei16_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, + vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vloxei16_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vloxei16_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vloxei16_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vloxei16_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vloxei16_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vloxei16_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vloxei16_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vloxei16_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vloxei16_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vloxei16_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vloxei16_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vloxei16_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vloxei16_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vloxei16_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vloxei16_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vloxei16_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vloxei16_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vloxei16_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vloxseg2ei16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vloxseg2ei16.c new file mode 100644 index 000000000..f685dec38 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vloxseg2ei16.c @@ -0,0 +1,145 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tum(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tum(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tum(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tum(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tum(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tumu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tumu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tumu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tumu(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tumu(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_mu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_mu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_mu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vloxseg3ei16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vloxseg3ei16.c new file mode 100644 index 000000000..ba9e4c78e --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vloxseg3ei16.c @@ -0,0 +1,119 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tum(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tum(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tum(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tum(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tumu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tumu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tumu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tumu(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_mu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_mu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_mu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vloxseg4ei16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vloxseg4ei16.c new file mode 100644 index 000000000..612d1efc9 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vloxseg4ei16.c @@ -0,0 +1,119 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tum(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tum(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tum(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tum(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tumu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tumu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tumu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tumu(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_mu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_mu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_mu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vloxseg5ei16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vloxseg5ei16.c new file mode 100644 index 000000000..c35e9c440 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vloxseg5ei16.c @@ -0,0 +1,93 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tum(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tum(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tum(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tumu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tumu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tumu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_mu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_mu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_mu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vloxseg6ei16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vloxseg6ei16.c new file mode 100644 index 000000000..24fa36ca6 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vloxseg6ei16.c @@ -0,0 +1,93 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tum(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tum(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tum(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tumu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tumu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tumu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_mu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_mu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_mu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vloxseg7ei16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vloxseg7ei16.c new file mode 100644 index 000000000..8bc7c4719 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vloxseg7ei16.c @@ -0,0 +1,93 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tum(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tum(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tum(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tumu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tumu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tumu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_mu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_mu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_mu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vloxseg8ei16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vloxseg8ei16.c new file mode 100644 index 000000000..f9a208c88 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vloxseg8ei16.c @@ -0,0 +1,93 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tum(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tum(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tum(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tumu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tumu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tumu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_mu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_mu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_mu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlse16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlse16.c new file mode 100644 index 000000000..105964343 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlse16.c @@ -0,0 +1,146 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vlse16_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vlse16_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vlse16_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vlse16_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vlse16_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vlse16_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vlse16_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vlse16_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vlse16_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vlse16_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vlse16_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vlse16_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vlse16_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vlse16_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vlse16_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vlse16_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vlse16_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vlse16_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vlse16_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vlse16_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vlse16_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vlse16_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vlse16_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vlse16_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg2e16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg2e16.c new file mode 100644 index 000000000..1eae1e523 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg2e16.c @@ -0,0 +1,114 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x2_t test_vlseg2e16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_tu(vd, rs1, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_tu(vd, rs1, vl); +} + +vbfloat16m1x2_t test_vlseg2e16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_tu(vd, rs1, vl); +} + +vbfloat16m2x2_t test_vlseg2e16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_tu(vd, rs1, vl); +} + +vbfloat16m4x2_t test_vlseg2e16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_tu(vd, rs1, vl); +} + +vbfloat16mf4x2_t test_vlseg2e16_v_bf16mf4x2_tum(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16_v_bf16mf2x2_tum(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_tum(vm, vd, rs1, vl); +} + +vbfloat16m1x2_t test_vlseg2e16_v_bf16m1x2_tum(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_tum(vm, vd, rs1, vl); +} + +vbfloat16m2x2_t test_vlseg2e16_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_tum(vm, vd, rs1, vl); +} + +vbfloat16m4x2_t test_vlseg2e16_v_bf16m4x2_tum(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4x2_t test_vlseg2e16_v_bf16mf4x2_tumu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16_v_bf16mf2x2_tumu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1x2_t test_vlseg2e16_v_bf16m1x2_tumu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16m2x2_t test_vlseg2e16_v_bf16m2x2_tumu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16m4x2_t test_vlseg2e16_v_bf16m4x2_tumu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4x2_t test_vlseg2e16_v_bf16mf4x2_mu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16_v_bf16mf2x2_mu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_mu(vm, vd, rs1, vl); +} + +vbfloat16m1x2_t test_vlseg2e16_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_mu(vm, vd, rs1, vl); +} + +vbfloat16m2x2_t test_vlseg2e16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_mu(vm, vd, rs1, vl); +} + +vbfloat16m4x2_t test_vlseg2e16_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg2e16ff.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg2e16ff.c new file mode 100644 index 000000000..13eacb312 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg2e16ff.c @@ -0,0 +1,138 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_tu(vbfloat16m1x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_tu(vbfloat16m2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_tu(vbfloat16m4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_tum(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_tum(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_tum(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_tum(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_tumu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_tumu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_tumu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_tumu(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_tumu(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_mu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_mu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg3e16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg3e16.c new file mode 100644 index 000000000..aa822781f --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg3e16.c @@ -0,0 +1,94 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x3_t test_vlseg3e16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_tu(vd, rs1, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_tu(vd, rs1, vl); +} + +vbfloat16m1x3_t test_vlseg3e16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_tu(vd, rs1, vl); +} + +vbfloat16m2x3_t test_vlseg3e16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_tu(vd, rs1, vl); +} + +vbfloat16mf4x3_t test_vlseg3e16_v_bf16mf4x3_tum(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16_v_bf16mf2x3_tum(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_tum(vm, vd, rs1, vl); +} + +vbfloat16m1x3_t test_vlseg3e16_v_bf16m1x3_tum(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_tum(vm, vd, rs1, vl); +} + +vbfloat16m2x3_t test_vlseg3e16_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4x3_t test_vlseg3e16_v_bf16mf4x3_tumu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16_v_bf16mf2x3_tumu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1x3_t test_vlseg3e16_v_bf16m1x3_tumu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16m2x3_t test_vlseg3e16_v_bf16m2x3_tumu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4x3_t test_vlseg3e16_v_bf16mf4x3_mu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16_v_bf16mf2x3_mu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_mu(vm, vd, rs1, vl); +} + +vbfloat16m1x3_t test_vlseg3e16_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_mu(vm, vd, rs1, vl); +} + +vbfloat16m2x3_t test_vlseg3e16_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg3e16ff.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg3e16ff.c new file mode 100644 index 000000000..8734d698a --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg3e16ff.c @@ -0,0 +1,113 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_tu(vbfloat16m1x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_tu(vbfloat16m2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_tum(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_tum(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_tum(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_tumu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_tumu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_tumu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_tumu(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_mu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_mu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg4e16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg4e16.c new file mode 100644 index 000000000..47d8eb1af --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg4e16.c @@ -0,0 +1,94 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x4_t test_vlseg4e16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_tu(vd, rs1, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_tu(vd, rs1, vl); +} + +vbfloat16m1x4_t test_vlseg4e16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_tu(vd, rs1, vl); +} + +vbfloat16m2x4_t test_vlseg4e16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_tu(vd, rs1, vl); +} + +vbfloat16mf4x4_t test_vlseg4e16_v_bf16mf4x4_tum(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16_v_bf16mf2x4_tum(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_tum(vm, vd, rs1, vl); +} + +vbfloat16m1x4_t test_vlseg4e16_v_bf16m1x4_tum(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_tum(vm, vd, rs1, vl); +} + +vbfloat16m2x4_t test_vlseg4e16_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4x4_t test_vlseg4e16_v_bf16mf4x4_tumu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16_v_bf16mf2x4_tumu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1x4_t test_vlseg4e16_v_bf16m1x4_tumu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16m2x4_t test_vlseg4e16_v_bf16m2x4_tumu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4x4_t test_vlseg4e16_v_bf16mf4x4_mu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16_v_bf16mf2x4_mu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_mu(vm, vd, rs1, vl); +} + +vbfloat16m1x4_t test_vlseg4e16_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_mu(vm, vd, rs1, vl); +} + +vbfloat16m2x4_t test_vlseg4e16_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg4e16ff.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg4e16ff.c new file mode 100644 index 000000000..0591f7fb9 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg4e16ff.c @@ -0,0 +1,113 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_tu(vbfloat16m1x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_tu(vbfloat16m2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_tum(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_tum(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_tum(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_tumu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_tumu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_tumu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_tumu(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_mu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_mu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg5e16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg5e16.c new file mode 100644 index 000000000..acccc223a --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg5e16.c @@ -0,0 +1,74 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x5_t test_vlseg5e16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_tu(vd, rs1, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_tu(vd, rs1, vl); +} + +vbfloat16m1x5_t test_vlseg5e16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_tu(vd, rs1, vl); +} + +vbfloat16mf4x5_t test_vlseg5e16_v_bf16mf4x5_tum(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16_v_bf16mf2x5_tum(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_tum(vm, vd, rs1, vl); +} + +vbfloat16m1x5_t test_vlseg5e16_v_bf16m1x5_tum(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4x5_t test_vlseg5e16_v_bf16mf4x5_tumu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16_v_bf16mf2x5_tumu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1x5_t test_vlseg5e16_v_bf16m1x5_tumu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4x5_t test_vlseg5e16_v_bf16mf4x5_mu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16_v_bf16mf2x5_mu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_mu(vm, vd, rs1, vl); +} + +vbfloat16m1x5_t test_vlseg5e16_v_bf16m1x5_mu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg5e16ff.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg5e16ff.c new file mode 100644 index 000000000..717f8ca98 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg5e16ff.c @@ -0,0 +1,88 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_tu(vbfloat16m1x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_tum(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_tum(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_tum(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_tumu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_tumu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_tumu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_mu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_mu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_mu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg6e16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg6e16.c new file mode 100644 index 000000000..ee452fbb9 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg6e16.c @@ -0,0 +1,74 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x6_t test_vlseg6e16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_tu(vd, rs1, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_tu(vd, rs1, vl); +} + +vbfloat16m1x6_t test_vlseg6e16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_tu(vd, rs1, vl); +} + +vbfloat16mf4x6_t test_vlseg6e16_v_bf16mf4x6_tum(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16_v_bf16mf2x6_tum(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_tum(vm, vd, rs1, vl); +} + +vbfloat16m1x6_t test_vlseg6e16_v_bf16m1x6_tum(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4x6_t test_vlseg6e16_v_bf16mf4x6_tumu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16_v_bf16mf2x6_tumu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1x6_t test_vlseg6e16_v_bf16m1x6_tumu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4x6_t test_vlseg6e16_v_bf16mf4x6_mu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16_v_bf16mf2x6_mu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_mu(vm, vd, rs1, vl); +} + +vbfloat16m1x6_t test_vlseg6e16_v_bf16m1x6_mu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg6e16ff.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg6e16ff.c new file mode 100644 index 000000000..20426758b --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg6e16ff.c @@ -0,0 +1,88 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_tu(vbfloat16m1x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_tum(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_tum(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_tum(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_tumu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_tumu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_tumu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_mu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_mu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_mu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg7e16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg7e16.c new file mode 100644 index 000000000..4a9a0a998 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg7e16.c @@ -0,0 +1,74 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x7_t test_vlseg7e16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_tu(vd, rs1, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_tu(vd, rs1, vl); +} + +vbfloat16m1x7_t test_vlseg7e16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_tu(vd, rs1, vl); +} + +vbfloat16mf4x7_t test_vlseg7e16_v_bf16mf4x7_tum(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16_v_bf16mf2x7_tum(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_tum(vm, vd, rs1, vl); +} + +vbfloat16m1x7_t test_vlseg7e16_v_bf16m1x7_tum(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4x7_t test_vlseg7e16_v_bf16mf4x7_tumu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16_v_bf16mf2x7_tumu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1x7_t test_vlseg7e16_v_bf16m1x7_tumu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4x7_t test_vlseg7e16_v_bf16mf4x7_mu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16_v_bf16mf2x7_mu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_mu(vm, vd, rs1, vl); +} + +vbfloat16m1x7_t test_vlseg7e16_v_bf16m1x7_mu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg7e16ff.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg7e16ff.c new file mode 100644 index 000000000..9d38a68f9 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg7e16ff.c @@ -0,0 +1,88 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_tu(vbfloat16m1x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_tum(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_tum(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_tum(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_tumu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_tumu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_tumu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_mu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_mu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_mu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg8e16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg8e16.c new file mode 100644 index 000000000..a0f4dfcda --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg8e16.c @@ -0,0 +1,74 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x8_t test_vlseg8e16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_tu(vd, rs1, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_tu(vd, rs1, vl); +} + +vbfloat16m1x8_t test_vlseg8e16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_tu(vd, rs1, vl); +} + +vbfloat16mf4x8_t test_vlseg8e16_v_bf16mf4x8_tum(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16_v_bf16mf2x8_tum(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_tum(vm, vd, rs1, vl); +} + +vbfloat16m1x8_t test_vlseg8e16_v_bf16m1x8_tum(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4x8_t test_vlseg8e16_v_bf16mf4x8_tumu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16_v_bf16mf2x8_tumu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1x8_t test_vlseg8e16_v_bf16m1x8_tumu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4x8_t test_vlseg8e16_v_bf16mf4x8_mu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16_v_bf16mf2x8_mu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_mu(vm, vd, rs1, vl); +} + +vbfloat16m1x8_t test_vlseg8e16_v_bf16m1x8_mu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg8e16ff.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg8e16ff.c new file mode 100644 index 000000000..b6672e992 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlseg8e16ff.c @@ -0,0 +1,88 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_tu(vbfloat16m1x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_tum(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_tum(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_tum(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_tumu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_tumu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_tumu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_mu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_mu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_mu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlsseg2e16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlsseg2e16.c new file mode 100644 index 000000000..202d8bb91 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlsseg2e16.c @@ -0,0 +1,135 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x2_t test_vlsseg2e16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vlsseg2e16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vlsseg2e16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vlsseg2e16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vlsseg2e16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vlsseg2e16_v_bf16mf4x2_tum(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vlsseg2e16_v_bf16mf2x2_tum(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vlsseg2e16_v_bf16m1x2_tum(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vlsseg2e16_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vlsseg2e16_v_bf16m4x2_tum(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vlsseg2e16_v_bf16mf4x2_tumu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vlsseg2e16_v_bf16mf2x2_tumu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vlsseg2e16_v_bf16m1x2_tumu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vlsseg2e16_v_bf16m2x2_tumu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vlsseg2e16_v_bf16m4x2_tumu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vlsseg2e16_v_bf16mf4x2_mu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vlsseg2e16_v_bf16mf2x2_mu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vlsseg2e16_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vlsseg2e16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vlsseg2e16_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlsseg3e16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlsseg3e16.c new file mode 100644 index 000000000..afd8f84a4 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlsseg3e16.c @@ -0,0 +1,111 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x3_t test_vlsseg3e16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vlsseg3e16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vlsseg3e16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vlsseg3e16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vlsseg3e16_v_bf16mf4x3_tum(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vlsseg3e16_v_bf16mf2x3_tum(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vlsseg3e16_v_bf16m1x3_tum(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vlsseg3e16_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vlsseg3e16_v_bf16mf4x3_tumu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vlsseg3e16_v_bf16mf2x3_tumu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vlsseg3e16_v_bf16m1x3_tumu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vlsseg3e16_v_bf16m2x3_tumu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vlsseg3e16_v_bf16mf4x3_mu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vlsseg3e16_v_bf16mf2x3_mu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vlsseg3e16_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vlsseg3e16_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlsseg4e16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlsseg4e16.c new file mode 100644 index 000000000..7a4f05726 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlsseg4e16.c @@ -0,0 +1,111 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x4_t test_vlsseg4e16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vlsseg4e16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vlsseg4e16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vlsseg4e16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vlsseg4e16_v_bf16mf4x4_tum(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vlsseg4e16_v_bf16mf2x4_tum(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vlsseg4e16_v_bf16m1x4_tum(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vlsseg4e16_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vlsseg4e16_v_bf16mf4x4_tumu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vlsseg4e16_v_bf16mf2x4_tumu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vlsseg4e16_v_bf16m1x4_tumu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vlsseg4e16_v_bf16m2x4_tumu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vlsseg4e16_v_bf16mf4x4_mu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vlsseg4e16_v_bf16mf2x4_mu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vlsseg4e16_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vlsseg4e16_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlsseg5e16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlsseg5e16.c new file mode 100644 index 000000000..37a1b3943 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlsseg5e16.c @@ -0,0 +1,87 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x5_t test_vlsseg5e16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vlsseg5e16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vlsseg5e16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg5e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vlsseg5e16_v_bf16mf4x5_tum(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vlsseg5e16_v_bf16mf2x5_tum(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vlsseg5e16_v_bf16m1x5_tum(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg5e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vlsseg5e16_v_bf16mf4x5_tumu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vlsseg5e16_v_bf16mf2x5_tumu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vlsseg5e16_v_bf16m1x5_tumu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vlsseg5e16_v_bf16mf4x5_mu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vlsseg5e16_v_bf16mf2x5_mu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vlsseg5e16_v_bf16m1x5_mu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg5e16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlsseg6e16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlsseg6e16.c new file mode 100644 index 000000000..0a9e457ac --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlsseg6e16.c @@ -0,0 +1,87 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x6_t test_vlsseg6e16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vlsseg6e16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vlsseg6e16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg6e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vlsseg6e16_v_bf16mf4x6_tum(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vlsseg6e16_v_bf16mf2x6_tum(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vlsseg6e16_v_bf16m1x6_tum(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg6e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vlsseg6e16_v_bf16mf4x6_tumu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vlsseg6e16_v_bf16mf2x6_tumu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vlsseg6e16_v_bf16m1x6_tumu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vlsseg6e16_v_bf16mf4x6_mu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vlsseg6e16_v_bf16mf2x6_mu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vlsseg6e16_v_bf16m1x6_mu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg6e16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlsseg7e16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlsseg7e16.c new file mode 100644 index 000000000..1b5d521aa --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlsseg7e16.c @@ -0,0 +1,87 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x7_t test_vlsseg7e16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vlsseg7e16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vlsseg7e16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg7e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vlsseg7e16_v_bf16mf4x7_tum(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vlsseg7e16_v_bf16mf2x7_tum(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vlsseg7e16_v_bf16m1x7_tum(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg7e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vlsseg7e16_v_bf16mf4x7_tumu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vlsseg7e16_v_bf16mf2x7_tumu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vlsseg7e16_v_bf16m1x7_tumu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vlsseg7e16_v_bf16mf4x7_mu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vlsseg7e16_v_bf16mf2x7_mu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vlsseg7e16_v_bf16m1x7_mu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg7e16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlsseg8e16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlsseg8e16.c new file mode 100644 index 000000000..976543d10 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vlsseg8e16.c @@ -0,0 +1,87 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x8_t test_vlsseg8e16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vlsseg8e16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vlsseg8e16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg8e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vlsseg8e16_v_bf16mf4x8_tum(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vlsseg8e16_v_bf16mf2x8_tum(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vlsseg8e16_v_bf16m1x8_tum(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg8e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vlsseg8e16_v_bf16mf4x8_tumu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vlsseg8e16_v_bf16mf2x8_tumu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vlsseg8e16_v_bf16m1x8_tumu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vlsseg8e16_v_bf16mf4x8_mu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vlsseg8e16_v_bf16mf2x8_mu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vlsseg8e16_v_bf16m1x8_mu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg8e16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vluxei16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vluxei16.c new file mode 100644 index 000000000..8170d1bc3 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vluxei16.c @@ -0,0 +1,146 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vluxei16_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vluxei16_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vluxei16_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vluxei16_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vluxei16_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vluxei16_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, + vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vluxei16_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vluxei16_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vluxei16_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vluxei16_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vluxei16_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vluxei16_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vluxei16_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vluxei16_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vluxei16_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vluxei16_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vluxei16_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vluxei16_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vluxei16_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vluxei16_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vluxei16_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vluxei16_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vluxei16_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vluxei16_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vluxseg2ei16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vluxseg2ei16.c new file mode 100644 index 000000000..c76a3fcac --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vluxseg2ei16.c @@ -0,0 +1,145 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tum(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tum(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tum(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tum(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tum(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tumu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tumu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tumu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tumu(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tumu(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_mu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_mu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_mu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vluxseg3ei16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vluxseg3ei16.c new file mode 100644 index 000000000..f15d79531 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vluxseg3ei16.c @@ -0,0 +1,119 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tum(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tum(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tum(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tum(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tumu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tumu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tumu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tumu(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_mu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_mu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_mu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vluxseg4ei16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vluxseg4ei16.c new file mode 100644 index 000000000..0ace96fd0 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vluxseg4ei16.c @@ -0,0 +1,119 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tum(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tum(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tum(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tum(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tumu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tumu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tumu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tumu(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_mu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_mu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_mu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vluxseg5ei16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vluxseg5ei16.c new file mode 100644 index 000000000..da9c77b0e --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vluxseg5ei16.c @@ -0,0 +1,93 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tum(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tum(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tum(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tumu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tumu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tumu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_mu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_mu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_mu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vluxseg6ei16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vluxseg6ei16.c new file mode 100644 index 000000000..6cf4e665c --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vluxseg6ei16.c @@ -0,0 +1,93 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tum(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tum(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tum(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tumu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tumu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tumu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_mu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_mu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_mu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vluxseg7ei16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vluxseg7ei16.c new file mode 100644 index 000000000..c4233c947 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vluxseg7ei16.c @@ -0,0 +1,93 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tum(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tum(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tum(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tumu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tumu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tumu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_mu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_mu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_mu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vluxseg8ei16.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vluxseg8ei16.c new file mode 100644 index 000000000..d38a2b17c --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vluxseg8ei16.c @@ -0,0 +1,93 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tum(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tum(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tum(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tumu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tumu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tumu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_mu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_mu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_mu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vmerge.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vmerge.c new file mode 100644 index 000000000..d4b73a2fa --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vmerge.c @@ -0,0 +1,44 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vmerge_vvm_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, vbool64_t v0, + size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); +} + +vbfloat16mf2_t test_vmerge_vvm_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, vbool32_t v0, + size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); +} + +vbfloat16m1_t test_vmerge_vvm_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, vbool16_t v0, + size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); +} + +vbfloat16m2_t test_vmerge_vvm_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, vbool8_t v0, + size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); +} + +vbfloat16m4_t test_vmerge_vvm_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, vbool4_t v0, + size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); +} + +vbfloat16m8_t test_vmerge_vvm_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs2, + vbfloat16m8_t vs1, vbool2_t v0, + size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vmv.c b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vmv.c new file mode 100644 index 000000000..36f83611f --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/llvm-overloaded-tests/vmv.c @@ -0,0 +1,38 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vbfloat16mf4_t test_vmv_v_v_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); +} + +vbfloat16mf2_t test_vmv_v_v_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); +} + +vbfloat16m1_t test_vmv_v_v_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); +} + +vbfloat16m2_t test_vmv_v_v_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); +} + +vbfloat16m4_t test_vmv_v_v_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); +} + +vbfloat16m8_t test_vmv_v_v_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, + size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vfncvtbf16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vfncvtbf16.c new file mode 100644 index 000000000..9e3542923 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vfncvtbf16.c @@ -0,0 +1,228 @@ +#include +#include + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_tu(vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tu(vd, vs2, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_tu(vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tu(vd, vs2, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_tu(vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tu(vd, vs2, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_tu(vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tu(vd, vs2, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_tu(vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tu(vd, vs2, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_tum(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tum(vm, vd, vs2, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_tum(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tum(vm, vd, vs2, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tum(vm, vd, vs2, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tum(vm, vd, vs2, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tum(vm, vd, vs2, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_tumu(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_tumu(vm, vd, vs2, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_tumu(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tumu(vm, vd, vs2, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tumu(vm, vd, vs2, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tumu(vm, vd, vs2, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tumu(vm, vd, vs2, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_mu(vm, vd, vs2, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_mu(vm, vd, vs2, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_mu(vm, vd, vs2, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_mu(vm, vd, vs2, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_mu(vm, vd, vs2, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_rm_tu(vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_rm_tu(vbfloat16mf2_t vd, + vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm_tu(vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_rm_tu(vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_rm_tu(vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_rm_tum(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_rm_tum(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm_tum(vbool16_t vm, + vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_rm_tum(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_rm_tum(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_rm_tumu(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_rm_tumu(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm_tumu(vbool16_t vm, + vbfloat16m1_t vd, + vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_rm_tumu(vbool8_t vm, + vbfloat16m2_t vd, + vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_rm_tumu(vbool4_t vm, + vbfloat16m4_t vd, + vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16mf4_t test_vfncvtbf16_f_f_w_bf16mf4_rm_mu(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16mf2_t test_vfncvtbf16_f_f_w_bf16mf2_rm_mu(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvtbf16_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m1_t test_vfncvtbf16_f_f_w_bf16m1_rm_mu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m2_t test_vfncvtbf16_f_f_w_bf16m2_rm_mu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +vbfloat16m4_t test_vfncvtbf16_f_f_w_bf16m4_rm_mu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvtbf16_f_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vfwcvtbf16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vfwcvtbf16.c new file mode 100644 index 000000000..dbf0a4d7d --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vfwcvtbf16.c @@ -0,0 +1,102 @@ +#include +#include + +vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_tu(vd, vs2, vl); +} + +vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1_tu(vfloat32m1_t vd, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f_tu(vd, vs2, vl); +} + +vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f_tu(vd, vs2, vl); +} + +vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f_tu(vd, vs2, vl); +} + +vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwcvtbf16_f_tu(vd, vs2, vl); +} + +vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_tum(vm, vd, vs2, vl); +} + +vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_tum(vm, vd, vs2, vl); +} + +vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_tum(vm, vd, vs2, vl); +} + +vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_tum(vm, vd, vs2, vl); +} + +vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_tum(vm, vd, vs2, vl); +} + +vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_tumu(vm, vd, vs2, vl); +} + +vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_tumu(vm, vd, vs2, vl); +} + +vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_tumu(vm, vd, vs2, vl); +} + +vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_tumu(vm, vd, vs2, vl); +} + +vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_tumu(vm, vd, vs2, vl); +} + +vfloat32mf2_t test_vfwcvtbf16_f_f_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_mu(vm, vd, vs2, vl); +} + +vfloat32m1_t test_vfwcvtbf16_f_f_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_mu(vm, vd, vs2, vl); +} + +vfloat32m2_t test_vfwcvtbf16_f_f_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_mu(vm, vd, vs2, vl); +} + +vfloat32m4_t test_vfwcvtbf16_f_f_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_mu(vm, vd, vs2, vl); +} + +vfloat32m8_t test_vfwcvtbf16_f_f_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvtbf16_f_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vfwmaccbf16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vfwmaccbf16.c new file mode 100644 index 000000000..c20b7c37d --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vfwmaccbf16.c @@ -0,0 +1,466 @@ +#include +#include + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_tu(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_rm_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_rm_tu(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_rm_tu(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_rm_tu(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32mf2_t test_vfwmaccbf16_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m1_t test_vfwmaccbf16_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m2_t test_vfwmaccbf16_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m4_t test_vfwmaccbf16_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +vfloat32m8_t test_vfwmaccbf16_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmaccbf16_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vle16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vle16.c new file mode 100644 index 000000000..c62108cd5 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vle16.c @@ -0,0 +1,122 @@ +#include +#include + +vbfloat16mf4_t test_vle16_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16_tu(vd, rs1, vl); +} + +vbfloat16mf2_t test_vle16_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16_tu(vd, rs1, vl); +} + +vbfloat16m1_t test_vle16_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16_tu(vd, rs1, vl); +} + +vbfloat16m2_t test_vle16_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16_tu(vd, rs1, vl); +} + +vbfloat16m4_t test_vle16_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16_tu(vd, rs1, vl); +} + +vbfloat16m8_t test_vle16_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, + size_t vl) { + return __riscv_vle16_tu(vd, rs1, vl); +} + +vbfloat16mf4_t test_vle16_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2_t test_vle16_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_tum(vm, vd, rs1, vl); +} + +vbfloat16m1_t test_vle16_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_tum(vm, vd, rs1, vl); +} + +vbfloat16m2_t test_vle16_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_tum(vm, vd, rs1, vl); +} + +vbfloat16m4_t test_vle16_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_tum(vm, vd, rs1, vl); +} + +vbfloat16m8_t test_vle16_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4_t test_vle16_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2_t test_vle16_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1_t test_vle16_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_tumu(vm, vd, rs1, vl); +} + +vbfloat16m2_t test_vle16_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_tumu(vm, vd, rs1, vl); +} + +vbfloat16m4_t test_vle16_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_tumu(vm, vd, rs1, vl); +} + +vbfloat16m8_t test_vle16_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4_t test_vle16_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2_t test_vle16_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_mu(vm, vd, rs1, vl); +} + +vbfloat16m1_t test_vle16_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_mu(vm, vd, rs1, vl); +} + +vbfloat16m2_t test_vle16_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_mu(vm, vd, rs1, vl); +} + +vbfloat16m4_t test_vle16_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_mu(vm, vd, rs1, vl); +} + +vbfloat16m8_t test_vle16_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vle16_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vle16ff.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vle16ff.c new file mode 100644 index 000000000..8311d7fa1 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vle16ff.c @@ -0,0 +1,140 @@ +#include +#include + +vbfloat16mf4_t test_vle16ff_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2_t test_vle16ff_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1_t test_vle16ff_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m2_t test_vle16ff_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m4_t test_vle16ff_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m8_t test_vle16ff_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vle16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4_t test_vle16ff_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2_t test_vle16ff_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1_t test_vle16ff_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2_t test_vle16ff_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m4_t test_vle16ff_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m8_t test_vle16ff_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4_t test_vle16ff_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2_t test_vle16ff_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1_t test_vle16ff_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2_t test_vle16ff_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m4_t test_vle16ff_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m8_t test_vle16ff_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4_t test_vle16ff_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2_t test_vle16ff_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1_t test_vle16ff_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2_t test_vle16ff_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m4_t test_vle16ff_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m8_t test_vle16ff_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl) { + return __riscv_vle16ff_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vloxei16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vloxei16.c new file mode 100644 index 000000000..053e6dd94 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vloxei16.c @@ -0,0 +1,140 @@ +#include +#include + +vbfloat16mf4_t test_vloxei16_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vloxei16_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vloxei16_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vloxei16_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vloxei16_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vloxei16_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, + vuint16m8_t rs2, size_t vl) { + return __riscv_vloxei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vloxei16_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vloxei16_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vloxei16_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vloxei16_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vloxei16_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vloxei16_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vloxei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vloxei16_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vloxei16_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vloxei16_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vloxei16_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vloxei16_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vloxei16_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vloxei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vloxei16_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vloxei16_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vloxei16_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vloxei16_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vloxei16_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vloxei16_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vloxei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vloxseg2ei16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vloxseg2ei16.c new file mode 100644 index 000000000..cebce8595 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vloxseg2ei16.c @@ -0,0 +1,139 @@ +#include +#include + +vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tum(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tum(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tum(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tum(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tum(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_tumu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_tumu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_tumu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_tumu(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_tumu(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vloxseg2ei16_v_bf16mf4x2_mu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vloxseg2ei16_v_bf16mf2x2_mu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vloxseg2ei16_v_bf16m1x2_mu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vloxseg2ei16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vloxseg2ei16_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vloxseg2ei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vloxseg3ei16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vloxseg3ei16.c new file mode 100644 index 000000000..7dc1de409 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vloxseg3ei16.c @@ -0,0 +1,113 @@ +#include +#include + +vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tum(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tum(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tum(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tum(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_tumu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_tumu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_tumu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_tumu(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vloxseg3ei16_v_bf16mf4x3_mu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vloxseg3ei16_v_bf16mf2x3_mu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vloxseg3ei16_v_bf16m1x3_mu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vloxseg3ei16_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg3ei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vloxseg4ei16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vloxseg4ei16.c new file mode 100644 index 000000000..a8db59018 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vloxseg4ei16.c @@ -0,0 +1,113 @@ +#include +#include + +vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tum(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tum(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tum(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tum(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_tumu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_tumu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_tumu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_tumu(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vloxseg4ei16_v_bf16mf4x4_mu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vloxseg4ei16_v_bf16mf2x4_mu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vloxseg4ei16_v_bf16m1x4_mu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vloxseg4ei16_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vloxseg4ei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vloxseg5ei16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vloxseg5ei16.c new file mode 100644 index 000000000..28cb437cb --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vloxseg5ei16.c @@ -0,0 +1,87 @@ +#include +#include + +vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tum(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tum(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tum(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_tumu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_tumu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_tumu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vloxseg5ei16_v_bf16mf4x5_mu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vloxseg5ei16_v_bf16mf2x5_mu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vloxseg5ei16_v_bf16m1x5_mu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg5ei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vloxseg6ei16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vloxseg6ei16.c new file mode 100644 index 000000000..9745d16e8 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vloxseg6ei16.c @@ -0,0 +1,87 @@ +#include +#include + +vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tum(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tum(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tum(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_tumu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_tumu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_tumu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vloxseg6ei16_v_bf16mf4x6_mu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vloxseg6ei16_v_bf16mf2x6_mu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vloxseg6ei16_v_bf16m1x6_mu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg6ei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vloxseg7ei16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vloxseg7ei16.c new file mode 100644 index 000000000..6b64fef2c --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vloxseg7ei16.c @@ -0,0 +1,87 @@ +#include +#include + +vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tum(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tum(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tum(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_tumu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_tumu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_tumu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vloxseg7ei16_v_bf16mf4x7_mu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vloxseg7ei16_v_bf16mf2x7_mu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vloxseg7ei16_v_bf16m1x7_mu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg7ei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vloxseg8ei16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vloxseg8ei16.c new file mode 100644 index 000000000..e5b6607d2 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vloxseg8ei16.c @@ -0,0 +1,87 @@ +#include +#include + +vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tum(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tum(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tum(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_tumu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_tumu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_tumu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vloxseg8ei16_v_bf16mf4x8_mu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vloxseg8ei16_v_bf16mf2x8_mu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vloxseg8ei16_v_bf16m1x8_mu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vloxseg8ei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlse16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlse16.c new file mode 100644 index 000000000..f31b6dae1 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlse16.c @@ -0,0 +1,140 @@ +#include +#include + +vbfloat16mf4_t test_vlse16_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vlse16_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vlse16_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vlse16_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vlse16_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vlse16_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlse16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vlse16_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vlse16_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vlse16_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vlse16_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vlse16_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vlse16_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vlse16_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vlse16_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vlse16_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vlse16_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vlse16_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vlse16_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vlse16_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vlse16_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vlse16_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vlse16_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vlse16_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vlse16_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlse16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg2e16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg2e16.c new file mode 100644 index 000000000..adf0bcfd7 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg2e16.c @@ -0,0 +1,108 @@ +#include +#include + +vbfloat16mf4x2_t test_vlseg2e16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_tu(vd, rs1, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_tu(vd, rs1, vl); +} + +vbfloat16m1x2_t test_vlseg2e16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_tu(vd, rs1, vl); +} + +vbfloat16m2x2_t test_vlseg2e16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_tu(vd, rs1, vl); +} + +vbfloat16m4x2_t test_vlseg2e16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_tu(vd, rs1, vl); +} + +vbfloat16mf4x2_t test_vlseg2e16_v_bf16mf4x2_tum(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16_v_bf16mf2x2_tum(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_tum(vm, vd, rs1, vl); +} + +vbfloat16m1x2_t test_vlseg2e16_v_bf16m1x2_tum(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_tum(vm, vd, rs1, vl); +} + +vbfloat16m2x2_t test_vlseg2e16_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_tum(vm, vd, rs1, vl); +} + +vbfloat16m4x2_t test_vlseg2e16_v_bf16m4x2_tum(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4x2_t test_vlseg2e16_v_bf16mf4x2_tumu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16_v_bf16mf2x2_tumu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1x2_t test_vlseg2e16_v_bf16m1x2_tumu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16m2x2_t test_vlseg2e16_v_bf16m2x2_tumu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16m4x2_t test_vlseg2e16_v_bf16m4x2_tumu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4x2_t test_vlseg2e16_v_bf16mf4x2_mu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16_v_bf16mf2x2_mu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_mu(vm, vd, rs1, vl); +} + +vbfloat16m1x2_t test_vlseg2e16_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_mu(vm, vd, rs1, vl); +} + +vbfloat16m2x2_t test_vlseg2e16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_mu(vm, vd, rs1, vl); +} + +vbfloat16m4x2_t test_vlseg2e16_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg2e16_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg2e16ff.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg2e16ff.c new file mode 100644 index 000000000..94daad69a --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg2e16ff.c @@ -0,0 +1,132 @@ +#include +#include + +vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_tu(vbfloat16m1x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_tu(vbfloat16m2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_tu(vbfloat16m4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_tum(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_tum(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_tum(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_tum(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_tumu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_tumu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_tumu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_tumu(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_tumu(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x2_t test_vlseg2e16ff_v_bf16mf4x2_mu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x2_t test_vlseg2e16ff_v_bf16mf2x2_mu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x2_t test_vlseg2e16ff_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x2_t test_vlseg2e16ff_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m4x2_t test_vlseg2e16ff_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg2e16ff_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg3e16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg3e16.c new file mode 100644 index 000000000..cf0d583ff --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg3e16.c @@ -0,0 +1,88 @@ +#include +#include + +vbfloat16mf4x3_t test_vlseg3e16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_tu(vd, rs1, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_tu(vd, rs1, vl); +} + +vbfloat16m1x3_t test_vlseg3e16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_tu(vd, rs1, vl); +} + +vbfloat16m2x3_t test_vlseg3e16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_tu(vd, rs1, vl); +} + +vbfloat16mf4x3_t test_vlseg3e16_v_bf16mf4x3_tum(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16_v_bf16mf2x3_tum(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_tum(vm, vd, rs1, vl); +} + +vbfloat16m1x3_t test_vlseg3e16_v_bf16m1x3_tum(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_tum(vm, vd, rs1, vl); +} + +vbfloat16m2x3_t test_vlseg3e16_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4x3_t test_vlseg3e16_v_bf16mf4x3_tumu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16_v_bf16mf2x3_tumu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1x3_t test_vlseg3e16_v_bf16m1x3_tumu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16m2x3_t test_vlseg3e16_v_bf16m2x3_tumu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4x3_t test_vlseg3e16_v_bf16mf4x3_mu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16_v_bf16mf2x3_mu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_mu(vm, vd, rs1, vl); +} + +vbfloat16m1x3_t test_vlseg3e16_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_mu(vm, vd, rs1, vl); +} + +vbfloat16m2x3_t test_vlseg3e16_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg3e16_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg3e16ff.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg3e16ff.c new file mode 100644 index 000000000..24a610a5d --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg3e16ff.c @@ -0,0 +1,107 @@ +#include +#include + +vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_tu(vbfloat16m1x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_tu(vbfloat16m2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_tum(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_tum(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_tum(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_tumu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_tumu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_tumu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_tumu(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x3_t test_vlseg3e16ff_v_bf16mf4x3_mu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x3_t test_vlseg3e16ff_v_bf16mf2x3_mu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x3_t test_vlseg3e16ff_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x3_t test_vlseg3e16ff_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg3e16ff_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg4e16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg4e16.c new file mode 100644 index 000000000..a0311857a --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg4e16.c @@ -0,0 +1,88 @@ +#include +#include + +vbfloat16mf4x4_t test_vlseg4e16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_tu(vd, rs1, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_tu(vd, rs1, vl); +} + +vbfloat16m1x4_t test_vlseg4e16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_tu(vd, rs1, vl); +} + +vbfloat16m2x4_t test_vlseg4e16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_tu(vd, rs1, vl); +} + +vbfloat16mf4x4_t test_vlseg4e16_v_bf16mf4x4_tum(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16_v_bf16mf2x4_tum(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_tum(vm, vd, rs1, vl); +} + +vbfloat16m1x4_t test_vlseg4e16_v_bf16m1x4_tum(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_tum(vm, vd, rs1, vl); +} + +vbfloat16m2x4_t test_vlseg4e16_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4x4_t test_vlseg4e16_v_bf16mf4x4_tumu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16_v_bf16mf2x4_tumu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1x4_t test_vlseg4e16_v_bf16m1x4_tumu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16m2x4_t test_vlseg4e16_v_bf16m2x4_tumu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4x4_t test_vlseg4e16_v_bf16mf4x4_mu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16_v_bf16mf2x4_mu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_mu(vm, vd, rs1, vl); +} + +vbfloat16m1x4_t test_vlseg4e16_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_mu(vm, vd, rs1, vl); +} + +vbfloat16m2x4_t test_vlseg4e16_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg4e16_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg4e16ff.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg4e16ff.c new file mode 100644 index 000000000..cc7cd3e8f --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg4e16ff.c @@ -0,0 +1,107 @@ +#include +#include + +vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_tu(vbfloat16m1x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_tu(vbfloat16m2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_tum(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_tum(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_tum(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_tumu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_tumu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_tumu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_tumu(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x4_t test_vlseg4e16ff_v_bf16mf4x4_mu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x4_t test_vlseg4e16ff_v_bf16mf2x4_mu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x4_t test_vlseg4e16ff_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m2x4_t test_vlseg4e16ff_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg4e16ff_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg5e16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg5e16.c new file mode 100644 index 000000000..07e8b5d4e --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg5e16.c @@ -0,0 +1,68 @@ +#include +#include + +vbfloat16mf4x5_t test_vlseg5e16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_tu(vd, rs1, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_tu(vd, rs1, vl); +} + +vbfloat16m1x5_t test_vlseg5e16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_tu(vd, rs1, vl); +} + +vbfloat16mf4x5_t test_vlseg5e16_v_bf16mf4x5_tum(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16_v_bf16mf2x5_tum(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_tum(vm, vd, rs1, vl); +} + +vbfloat16m1x5_t test_vlseg5e16_v_bf16m1x5_tum(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4x5_t test_vlseg5e16_v_bf16mf4x5_tumu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16_v_bf16mf2x5_tumu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1x5_t test_vlseg5e16_v_bf16m1x5_tumu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4x5_t test_vlseg5e16_v_bf16mf4x5_mu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16_v_bf16mf2x5_mu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_mu(vm, vd, rs1, vl); +} + +vbfloat16m1x5_t test_vlseg5e16_v_bf16m1x5_mu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg5e16_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg5e16ff.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg5e16ff.c new file mode 100644 index 000000000..e13f2ef80 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg5e16ff.c @@ -0,0 +1,82 @@ +#include +#include + +vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_tu(vbfloat16m1x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_tum(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_tum(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_tum(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_tumu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_tumu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_tumu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x5_t test_vlseg5e16ff_v_bf16mf4x5_mu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x5_t test_vlseg5e16ff_v_bf16mf2x5_mu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x5_t test_vlseg5e16ff_v_bf16m1x5_mu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg5e16ff_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg6e16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg6e16.c new file mode 100644 index 000000000..58af0751a --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg6e16.c @@ -0,0 +1,68 @@ +#include +#include + +vbfloat16mf4x6_t test_vlseg6e16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_tu(vd, rs1, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_tu(vd, rs1, vl); +} + +vbfloat16m1x6_t test_vlseg6e16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_tu(vd, rs1, vl); +} + +vbfloat16mf4x6_t test_vlseg6e16_v_bf16mf4x6_tum(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16_v_bf16mf2x6_tum(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_tum(vm, vd, rs1, vl); +} + +vbfloat16m1x6_t test_vlseg6e16_v_bf16m1x6_tum(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4x6_t test_vlseg6e16_v_bf16mf4x6_tumu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16_v_bf16mf2x6_tumu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1x6_t test_vlseg6e16_v_bf16m1x6_tumu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4x6_t test_vlseg6e16_v_bf16mf4x6_mu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16_v_bf16mf2x6_mu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_mu(vm, vd, rs1, vl); +} + +vbfloat16m1x6_t test_vlseg6e16_v_bf16m1x6_mu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg6e16_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg6e16ff.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg6e16ff.c new file mode 100644 index 000000000..b27f2357d --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg6e16ff.c @@ -0,0 +1,82 @@ +#include +#include + +vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_tu(vbfloat16m1x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_tum(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_tum(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_tum(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_tumu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_tumu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_tumu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x6_t test_vlseg6e16ff_v_bf16mf4x6_mu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x6_t test_vlseg6e16ff_v_bf16mf2x6_mu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x6_t test_vlseg6e16ff_v_bf16m1x6_mu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg6e16ff_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg7e16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg7e16.c new file mode 100644 index 000000000..4bfca3c35 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg7e16.c @@ -0,0 +1,68 @@ +#include +#include + +vbfloat16mf4x7_t test_vlseg7e16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_tu(vd, rs1, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_tu(vd, rs1, vl); +} + +vbfloat16m1x7_t test_vlseg7e16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_tu(vd, rs1, vl); +} + +vbfloat16mf4x7_t test_vlseg7e16_v_bf16mf4x7_tum(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16_v_bf16mf2x7_tum(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_tum(vm, vd, rs1, vl); +} + +vbfloat16m1x7_t test_vlseg7e16_v_bf16m1x7_tum(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4x7_t test_vlseg7e16_v_bf16mf4x7_tumu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16_v_bf16mf2x7_tumu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1x7_t test_vlseg7e16_v_bf16m1x7_tumu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4x7_t test_vlseg7e16_v_bf16mf4x7_mu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16_v_bf16mf2x7_mu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_mu(vm, vd, rs1, vl); +} + +vbfloat16m1x7_t test_vlseg7e16_v_bf16m1x7_mu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg7e16_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg7e16ff.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg7e16ff.c new file mode 100644 index 000000000..af9b65e7e --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg7e16ff.c @@ -0,0 +1,82 @@ +#include +#include + +vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_tu(vbfloat16m1x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_tum(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_tum(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_tum(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_tumu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_tumu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_tumu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x7_t test_vlseg7e16ff_v_bf16mf4x7_mu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x7_t test_vlseg7e16ff_v_bf16mf2x7_mu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x7_t test_vlseg7e16ff_v_bf16m1x7_mu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg7e16ff_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg8e16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg8e16.c new file mode 100644 index 000000000..653938350 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg8e16.c @@ -0,0 +1,68 @@ +#include +#include + +vbfloat16mf4x8_t test_vlseg8e16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_tu(vd, rs1, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_tu(vd, rs1, vl); +} + +vbfloat16m1x8_t test_vlseg8e16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_tu(vd, rs1, vl); +} + +vbfloat16mf4x8_t test_vlseg8e16_v_bf16mf4x8_tum(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16_v_bf16mf2x8_tum(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_tum(vm, vd, rs1, vl); +} + +vbfloat16m1x8_t test_vlseg8e16_v_bf16m1x8_tum(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_tum(vm, vd, rs1, vl); +} + +vbfloat16mf4x8_t test_vlseg8e16_v_bf16mf4x8_tumu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16_v_bf16mf2x8_tumu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16m1x8_t test_vlseg8e16_v_bf16m1x8_tumu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_tumu(vm, vd, rs1, vl); +} + +vbfloat16mf4x8_t test_vlseg8e16_v_bf16mf4x8_mu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_mu(vm, vd, rs1, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16_v_bf16mf2x8_mu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_mu(vm, vd, rs1, vl); +} + +vbfloat16m1x8_t test_vlseg8e16_v_bf16m1x8_mu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t vl) { + return __riscv_vlseg8e16_mu(vm, vd, rs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg8e16ff.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg8e16ff.c new file mode 100644 index 000000000..a4c013385 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlseg8e16ff.c @@ -0,0 +1,82 @@ +#include +#include + +vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_tu(vbfloat16m1x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tu(vd, rs1, new_vl, vl); +} + +vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_tum(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_tum(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_tum(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tum(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_tumu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_tumu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_tumu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_tumu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf4x8_t test_vlseg8e16ff_v_bf16mf4x8_mu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16mf2x8_t test_vlseg8e16ff_v_bf16mf2x8_mu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_mu(vm, vd, rs1, new_vl, vl); +} + +vbfloat16m1x8_t test_vlseg8e16ff_v_bf16m1x8_mu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, + size_t *new_vl, size_t vl) { + return __riscv_vlseg8e16ff_mu(vm, vd, rs1, new_vl, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlsseg2e16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlsseg2e16.c new file mode 100644 index 000000000..8fb0cd0fb --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlsseg2e16.c @@ -0,0 +1,129 @@ +#include +#include + +vbfloat16mf4x2_t test_vlsseg2e16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vlsseg2e16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vlsseg2e16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vlsseg2e16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vlsseg2e16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vlsseg2e16_v_bf16mf4x2_tum(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vlsseg2e16_v_bf16mf2x2_tum(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vlsseg2e16_v_bf16m1x2_tum(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vlsseg2e16_v_bf16m2x2_tum(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vlsseg2e16_v_bf16m4x2_tum(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vlsseg2e16_v_bf16mf4x2_tumu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vlsseg2e16_v_bf16mf2x2_tumu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vlsseg2e16_v_bf16m1x2_tumu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vlsseg2e16_v_bf16m2x2_tumu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vlsseg2e16_v_bf16m4x2_tumu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vlsseg2e16_v_bf16mf4x2_mu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vlsseg2e16_v_bf16mf2x2_mu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg2e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vlsseg2e16_v_bf16m1x2_mu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vlsseg2e16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vlsseg2e16_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg2e16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlsseg3e16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlsseg3e16.c new file mode 100644 index 000000000..cd0bf487b --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlsseg3e16.c @@ -0,0 +1,105 @@ +#include +#include + +vbfloat16mf4x3_t test_vlsseg3e16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vlsseg3e16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vlsseg3e16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vlsseg3e16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vlsseg3e16_v_bf16mf4x3_tum(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vlsseg3e16_v_bf16mf2x3_tum(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vlsseg3e16_v_bf16m1x3_tum(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vlsseg3e16_v_bf16m2x3_tum(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vlsseg3e16_v_bf16mf4x3_tumu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vlsseg3e16_v_bf16mf2x3_tumu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vlsseg3e16_v_bf16m1x3_tumu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vlsseg3e16_v_bf16m2x3_tumu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vlsseg3e16_v_bf16mf4x3_mu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vlsseg3e16_v_bf16mf2x3_mu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg3e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vlsseg3e16_v_bf16m1x3_mu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vlsseg3e16_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg3e16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlsseg4e16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlsseg4e16.c new file mode 100644 index 000000000..533804a3f --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlsseg4e16.c @@ -0,0 +1,105 @@ +#include +#include + +vbfloat16mf4x4_t test_vlsseg4e16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vlsseg4e16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vlsseg4e16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vlsseg4e16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vlsseg4e16_v_bf16mf4x4_tum(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vlsseg4e16_v_bf16mf2x4_tum(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vlsseg4e16_v_bf16m1x4_tum(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vlsseg4e16_v_bf16m2x4_tum(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vlsseg4e16_v_bf16mf4x4_tumu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vlsseg4e16_v_bf16mf2x4_tumu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vlsseg4e16_v_bf16m1x4_tumu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vlsseg4e16_v_bf16m2x4_tumu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vlsseg4e16_v_bf16mf4x4_mu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vlsseg4e16_v_bf16mf2x4_mu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg4e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vlsseg4e16_v_bf16m1x4_mu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vlsseg4e16_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg4e16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlsseg5e16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlsseg5e16.c new file mode 100644 index 000000000..677e6f2ec --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlsseg5e16.c @@ -0,0 +1,81 @@ +#include +#include + +vbfloat16mf4x5_t test_vlsseg5e16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vlsseg5e16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vlsseg5e16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg5e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vlsseg5e16_v_bf16mf4x5_tum(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vlsseg5e16_v_bf16mf2x5_tum(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vlsseg5e16_v_bf16m1x5_tum(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg5e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vlsseg5e16_v_bf16mf4x5_tumu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vlsseg5e16_v_bf16mf2x5_tumu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vlsseg5e16_v_bf16m1x5_tumu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vlsseg5e16_v_bf16mf4x5_mu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vlsseg5e16_v_bf16mf2x5_mu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg5e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vlsseg5e16_v_bf16m1x5_mu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg5e16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlsseg6e16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlsseg6e16.c new file mode 100644 index 000000000..bdae126e0 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlsseg6e16.c @@ -0,0 +1,81 @@ +#include +#include + +vbfloat16mf4x6_t test_vlsseg6e16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vlsseg6e16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vlsseg6e16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg6e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vlsseg6e16_v_bf16mf4x6_tum(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vlsseg6e16_v_bf16mf2x6_tum(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vlsseg6e16_v_bf16m1x6_tum(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg6e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vlsseg6e16_v_bf16mf4x6_tumu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vlsseg6e16_v_bf16mf2x6_tumu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vlsseg6e16_v_bf16m1x6_tumu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vlsseg6e16_v_bf16mf4x6_mu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vlsseg6e16_v_bf16mf2x6_mu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg6e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vlsseg6e16_v_bf16m1x6_mu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg6e16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlsseg7e16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlsseg7e16.c new file mode 100644 index 000000000..efd8b3a9d --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlsseg7e16.c @@ -0,0 +1,81 @@ +#include +#include + +vbfloat16mf4x7_t test_vlsseg7e16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vlsseg7e16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vlsseg7e16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg7e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vlsseg7e16_v_bf16mf4x7_tum(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vlsseg7e16_v_bf16mf2x7_tum(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vlsseg7e16_v_bf16m1x7_tum(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg7e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vlsseg7e16_v_bf16mf4x7_tumu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vlsseg7e16_v_bf16mf2x7_tumu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vlsseg7e16_v_bf16m1x7_tumu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vlsseg7e16_v_bf16mf4x7_mu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vlsseg7e16_v_bf16mf2x7_mu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg7e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vlsseg7e16_v_bf16m1x7_mu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg7e16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlsseg8e16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlsseg8e16.c new file mode 100644 index 000000000..97fd79283 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vlsseg8e16.c @@ -0,0 +1,81 @@ +#include +#include + +vbfloat16mf4x8_t test_vlsseg8e16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vlsseg8e16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vlsseg8e16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg8e16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vlsseg8e16_v_bf16mf4x8_tum(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vlsseg8e16_v_bf16mf2x8_tum(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vlsseg8e16_v_bf16m1x8_tum(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg8e16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vlsseg8e16_v_bf16mf4x8_tumu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vlsseg8e16_v_bf16mf2x8_tumu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vlsseg8e16_v_bf16m1x8_tumu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vlsseg8e16_v_bf16mf4x8_mu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vlsseg8e16_v_bf16mf2x8_mu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + ptrdiff_t rs2, size_t vl) { + return __riscv_vlsseg8e16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vlsseg8e16_v_bf16m1x8_mu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl) { + return __riscv_vlsseg8e16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vluxei16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vluxei16.c new file mode 100644 index 000000000..226dec981 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vluxei16.c @@ -0,0 +1,140 @@ +#include +#include + +vbfloat16mf4_t test_vluxei16_v_bf16mf4_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vluxei16_v_bf16mf2_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vluxei16_v_bf16m1_tu(vbfloat16m1_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vluxei16_v_bf16m2_tu(vbfloat16m2_t vd, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vluxei16_v_bf16m4_tu(vbfloat16m4_t vd, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vluxei16_v_bf16m8_tu(vbfloat16m8_t vd, const __bf16 *rs1, + vuint16m8_t rs2, size_t vl) { + return __riscv_vluxei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vluxei16_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vluxei16_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vluxei16_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vluxei16_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vluxei16_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vluxei16_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vluxei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vluxei16_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vluxei16_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vluxei16_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vluxei16_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vluxei16_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vluxei16_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vluxei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4_t test_vluxei16_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2_t test_vluxei16_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1_t test_vluxei16_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2_t test_vluxei16_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4_t test_vluxei16_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m8_t test_vluxei16_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl) { + return __riscv_vluxei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vluxseg2ei16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vluxseg2ei16.c new file mode 100644 index 000000000..dec0690d9 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vluxseg2ei16.c @@ -0,0 +1,139 @@ +#include +#include + +vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tu(vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tu(vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tu(vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tu(vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tu(vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tum(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tum(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tum(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tum(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tum(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_tumu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_tumu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_tumu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_tumu(vbool8_t vm, + vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_tumu(vbool4_t vm, + vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x2_t test_vluxseg2ei16_v_bf16mf4x2_mu(vbool64_t vm, + vbfloat16mf4x2_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x2_t test_vluxseg2ei16_v_bf16mf2x2_mu(vbool32_t vm, + vbfloat16mf2x2_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x2_t test_vluxseg2ei16_v_bf16m1x2_mu(vbool16_t vm, + vbfloat16m1x2_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x2_t test_vluxseg2ei16_v_bf16m2x2_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m4x2_t test_vluxseg2ei16_v_bf16m4x2_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, + vuint16m4_t rs2, size_t vl) { + return __riscv_vluxseg2ei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vluxseg3ei16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vluxseg3ei16.c new file mode 100644 index 000000000..127d97bb5 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vluxseg3ei16.c @@ -0,0 +1,113 @@ +#include +#include + +vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tu(vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tu(vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tu(vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tu(vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tum(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tum(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tum(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tum(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_tumu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_tumu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_tumu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_tumu(vbool8_t vm, + vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x3_t test_vluxseg3ei16_v_bf16mf4x3_mu(vbool64_t vm, + vbfloat16mf4x3_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x3_t test_vluxseg3ei16_v_bf16mf2x3_mu(vbool32_t vm, + vbfloat16mf2x3_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x3_t test_vluxseg3ei16_v_bf16m1x3_mu(vbool16_t vm, + vbfloat16m1x3_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x3_t test_vluxseg3ei16_v_bf16m2x3_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg3ei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vluxseg4ei16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vluxseg4ei16.c new file mode 100644 index 000000000..387738336 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vluxseg4ei16.c @@ -0,0 +1,113 @@ +#include +#include + +vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tu(vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tu(vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tu(vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tu(vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tum(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tum(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tum(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tum(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_tumu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_tumu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_tumu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_tumu(vbool8_t vm, + vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x4_t test_vluxseg4ei16_v_bf16mf4x4_mu(vbool64_t vm, + vbfloat16mf4x4_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x4_t test_vluxseg4ei16_v_bf16mf2x4_mu(vbool32_t vm, + vbfloat16mf2x4_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x4_t test_vluxseg4ei16_v_bf16m1x4_mu(vbool16_t vm, + vbfloat16m1x4_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m2x4_t test_vluxseg4ei16_v_bf16m2x4_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, + vuint16m2_t rs2, size_t vl) { + return __riscv_vluxseg4ei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vluxseg5ei16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vluxseg5ei16.c new file mode 100644 index 000000000..e44715aab --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vluxseg5ei16.c @@ -0,0 +1,87 @@ +#include +#include + +vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tu(vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tu(vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tu(vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tum(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tum(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tum(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_tumu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_tumu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_tumu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x5_t test_vluxseg5ei16_v_bf16mf4x5_mu(vbool64_t vm, + vbfloat16mf4x5_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x5_t test_vluxseg5ei16_v_bf16mf2x5_mu(vbool32_t vm, + vbfloat16mf2x5_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x5_t test_vluxseg5ei16_v_bf16m1x5_mu(vbool16_t vm, + vbfloat16m1x5_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg5ei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vluxseg6ei16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vluxseg6ei16.c new file mode 100644 index 000000000..86655a32a --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vluxseg6ei16.c @@ -0,0 +1,87 @@ +#include +#include + +vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tu(vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tu(vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tu(vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tum(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tum(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tum(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_tumu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_tumu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_tumu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x6_t test_vluxseg6ei16_v_bf16mf4x6_mu(vbool64_t vm, + vbfloat16mf4x6_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x6_t test_vluxseg6ei16_v_bf16mf2x6_mu(vbool32_t vm, + vbfloat16mf2x6_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x6_t test_vluxseg6ei16_v_bf16m1x6_mu(vbool16_t vm, + vbfloat16m1x6_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg6ei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vluxseg7ei16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vluxseg7ei16.c new file mode 100644 index 000000000..f0473d13d --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vluxseg7ei16.c @@ -0,0 +1,87 @@ +#include +#include + +vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tu(vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tu(vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tu(vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tum(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tum(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tum(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_tumu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_tumu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_tumu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x7_t test_vluxseg7ei16_v_bf16mf4x7_mu(vbool64_t vm, + vbfloat16mf4x7_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x7_t test_vluxseg7ei16_v_bf16mf2x7_mu(vbool32_t vm, + vbfloat16mf2x7_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x7_t test_vluxseg7ei16_v_bf16m1x7_mu(vbool16_t vm, + vbfloat16m1x7_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg7ei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vluxseg8ei16.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vluxseg8ei16.c new file mode 100644 index 000000000..07ed8156f --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vluxseg8ei16.c @@ -0,0 +1,87 @@ +#include +#include + +vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tu(vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tu(vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tu(vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tu(vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tum(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tum(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tum(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tum(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_tumu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, + size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_tumu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, + size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_tumu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_tumu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf4x8_t test_vluxseg8ei16_v_bf16mf4x8_mu(vbool64_t vm, + vbfloat16mf4x8_t vd, + const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16mf2x8_t test_vluxseg8ei16_v_bf16mf2x8_mu(vbool32_t vm, + vbfloat16mf2x8_t vd, + const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); +} + +vbfloat16m1x8_t test_vluxseg8ei16_v_bf16m1x8_mu(vbool16_t vm, + vbfloat16m1x8_t vd, + const __bf16 *rs1, + vuint16m1_t rs2, size_t vl) { + return __riscv_vluxseg8ei16_mu(vm, vd, rs1, rs2, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vmerge.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vmerge.c new file mode 100644 index 000000000..73c4cffb7 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vmerge.c @@ -0,0 +1,38 @@ +#include +#include + +vbfloat16mf4_t test_vmerge_vvm_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, vbool64_t v0, + size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); +} + +vbfloat16mf2_t test_vmerge_vvm_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, vbool32_t v0, + size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); +} + +vbfloat16m1_t test_vmerge_vvm_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, vbool16_t v0, + size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); +} + +vbfloat16m2_t test_vmerge_vvm_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, vbool8_t v0, + size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); +} + +vbfloat16m4_t test_vmerge_vvm_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, vbool4_t v0, + size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); +} + +vbfloat16m8_t test_vmerge_vvm_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs2, + vbfloat16m8_t vs1, vbool2_t v0, + size_t vl) { + return __riscv_vmerge_tu(vd, vs2, vs1, v0, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vmv.c b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vmv.c new file mode 100644 index 000000000..dfc0514fc --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded-api-testing/vmv.c @@ -0,0 +1,32 @@ +#include +#include + +vbfloat16mf4_t test_vmv_v_v_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); +} + +vbfloat16mf2_t test_vmv_v_v_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); +} + +vbfloat16m1_t test_vmv_v_v_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); +} + +vbfloat16m2_t test_vmv_v_v_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); +} + +vbfloat16m4_t test_vmv_v_v_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); +} + +vbfloat16m8_t test_vmv_v_v_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, + size_t vl) { + return __riscv_vmv_v_tu(vd, vs1, vl); +} diff --git a/auto-generated/bfloat16/policy_funcs/overloaded_intrinsic_funcs.adoc b/auto-generated/bfloat16/policy_funcs/overloaded_intrinsic_funcs.adoc new file mode 100644 index 000000000..e4259bce4 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded_intrinsic_funcs.adoc @@ -0,0 +1,2134 @@ + +=== BFloat16 Vector Loads and Stores Intrinsics + +[[policy-variant-overloadedbf16-vector-unit-stride-load]] +==== Vector Unit-Stride Load Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vle16_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf2_t __riscv_vle16_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m1_t __riscv_vle16_tu(vbfloat16m1_t vd, const __bf16 *rs1, size_t vl); +vbfloat16m2_t __riscv_vle16_tu(vbfloat16m2_t vd, const __bf16 *rs1, size_t vl); +vbfloat16m4_t __riscv_vle16_tu(vbfloat16m4_t vd, const __bf16 *rs1, size_t vl); +vbfloat16m8_t __riscv_vle16_tu(vbfloat16m8_t vd, const __bf16 *rs1, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vle16_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2_t __riscv_vle16_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1_t __riscv_vle16_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2_t __riscv_vle16_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m4_t __riscv_vle16_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m8_t __riscv_vle16_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vle16_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2_t __riscv_vle16_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1_t __riscv_vle16_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2_t __riscv_vle16_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m4_t __riscv_vle16_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m8_t __riscv_vle16_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vle16_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2_t __riscv_vle16_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1_t __riscv_vle16_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2_t __riscv_vle16_mu(vbool8_t vm, vbfloat16m2_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m4_t __riscv_vle16_mu(vbool4_t vm, vbfloat16m4_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m8_t __riscv_vle16_mu(vbool2_t vm, vbfloat16m8_t vd, const __bf16 *rs1, + size_t vl); +---- + +[[policy-variant-overloadedbf16-vector-unit-stride-store]] +==== Vector Unit-Stride Store Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-overloadedvector-strided-load]] +==== Vector Strided Load Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vlse16_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vlse16_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1_t __riscv_vlse16_tu(vbfloat16m1_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2_t __riscv_vlse16_tu(vbfloat16m2_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m4_t __riscv_vlse16_tu(vbfloat16m4_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m8_t __riscv_vlse16_tu(vbfloat16m8_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vlse16_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vlse16_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +vbfloat16m1_t __riscv_vlse16_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +vbfloat16m2_t __riscv_vlse16_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +vbfloat16m4_t __riscv_vlse16_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +vbfloat16m8_t __riscv_vlse16_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vlse16_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vlse16_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +vbfloat16m1_t __riscv_vlse16_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +vbfloat16m2_t __riscv_vlse16_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +vbfloat16m4_t __riscv_vlse16_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +vbfloat16m8_t __riscv_vlse16_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vlse16_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vlse16_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +vbfloat16m1_t __riscv_vlse16_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +vbfloat16m2_t __riscv_vlse16_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +vbfloat16m4_t __riscv_vlse16_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +vbfloat16m8_t __riscv_vlse16_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +---- + +[[policy-variant-overloadedvector-strided-store]] +==== Vector Strided Store Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-overloadedvector-indexed-load]] +==== Vector Indexed Load Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vloxei16_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vloxei16_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vloxei16_tu(vbfloat16m1_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2_t __riscv_vloxei16_tu(vbfloat16m2_t vd, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4_t __riscv_vloxei16_tu(vbfloat16m4_t vd, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +vbfloat16m8_t __riscv_vloxei16_tu(vbfloat16m8_t vd, const __bf16 *rs1, + vuint16m8_t rs2, size_t vl); +vbfloat16mf4_t __riscv_vluxei16_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vluxei16_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vluxei16_tu(vbfloat16m1_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2_t __riscv_vluxei16_tu(vbfloat16m2_t vd, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4_t __riscv_vluxei16_tu(vbfloat16m4_t vd, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +vbfloat16m8_t __riscv_vluxei16_tu(vbfloat16m8_t vd, const __bf16 *rs1, + vuint16m8_t rs2, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vloxei16_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2_t __riscv_vloxei16_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1_t __riscv_vloxei16_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vloxei16_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vloxei16_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vloxei16_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +vbfloat16mf4_t __riscv_vluxei16_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2_t __riscv_vluxei16_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1_t __riscv_vluxei16_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vluxei16_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vluxei16_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vluxei16_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vloxei16_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2_t __riscv_vloxei16_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1_t __riscv_vloxei16_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vloxei16_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vloxei16_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vloxei16_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +vbfloat16mf4_t __riscv_vluxei16_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2_t __riscv_vluxei16_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1_t __riscv_vluxei16_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vluxei16_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vluxei16_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vluxei16_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vloxei16_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2_t __riscv_vloxei16_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1_t __riscv_vloxei16_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vloxei16_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vloxei16_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vloxei16_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +vbfloat16mf4_t __riscv_vluxei16_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2_t __riscv_vluxei16_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1_t __riscv_vluxei16_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vluxei16_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vluxei16_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vluxei16_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +---- + +[[policy-variant-overloadedvector-indexed-store]] +==== Vector Indexed Store Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-overloadedunit-stride-fault-only-first-loads]] +==== Unit-stride Fault-Only-First Loads Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vle16ff_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2_t __riscv_vle16ff_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1_t __riscv_vle16ff_tu(vbfloat16m1_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2_t __riscv_vle16ff_tu(vbfloat16m2_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m4_t __riscv_vle16ff_tu(vbfloat16m4_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m8_t __riscv_vle16ff_tu(vbfloat16m8_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vle16ff_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2_t __riscv_vle16ff_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1_t __riscv_vle16ff_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t *new_vl, size_t vl); +vbfloat16m2_t __riscv_vle16ff_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t *new_vl, size_t vl); +vbfloat16m4_t __riscv_vle16ff_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t *new_vl, size_t vl); +vbfloat16m8_t __riscv_vle16ff_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t *new_vl, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vle16ff_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2_t __riscv_vle16ff_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1_t __riscv_vle16ff_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m2_t __riscv_vle16ff_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m4_t __riscv_vle16ff_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m8_t __riscv_vle16ff_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vle16ff_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t *new_vl, size_t vl); +vbfloat16mf2_t __riscv_vle16ff_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t *new_vl, size_t vl); +vbfloat16m1_t __riscv_vle16ff_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t *new_vl, size_t vl); +vbfloat16m2_t __riscv_vle16ff_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t *new_vl, size_t vl); +vbfloat16m4_t __riscv_vle16ff_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t *new_vl, size_t vl); +vbfloat16m8_t __riscv_vle16ff_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t *new_vl, size_t vl); +---- + +=== BFloat16 Vector Loads and Stores Segment Intrinsics + +[[policy-variant-overloadedvector-unit-stride-segment-load]] +==== Vector Unit-Stride Segment Load Intrinsics + +[,c] +---- +vbfloat16mf4x2_t __riscv_vlseg2e16_tu(vbfloat16mf4x2_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16_tu(vbfloat16mf4x3_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16_tu(vbfloat16mf4x4_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16_tu(vbfloat16mf4x5_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16_tu(vbfloat16mf4x6_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16_tu(vbfloat16mf4x7_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16_tu(vbfloat16mf4x8_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16_tu(vbfloat16mf2x2_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16_tu(vbfloat16mf2x3_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16_tu(vbfloat16mf2x4_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16_tu(vbfloat16mf2x5_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16_tu(vbfloat16mf2x6_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16_tu(vbfloat16mf2x7_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16_tu(vbfloat16mf2x8_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16_tu(vbfloat16m1x2_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16_tu(vbfloat16m1x3_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16_tu(vbfloat16m1x4_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16_tu(vbfloat16m1x5_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16_tu(vbfloat16m1x6_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16_tu(vbfloat16m1x7_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16_tu(vbfloat16m1x8_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16_tu(vbfloat16m2x2_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16_tu(vbfloat16m2x3_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16_tu(vbfloat16m2x4_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16_tu(vbfloat16m4x2_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf4x2_t __riscv_vlseg2e16ff_tu(vbfloat16mf4x2_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16ff_tu(vbfloat16mf4x3_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16ff_tu(vbfloat16mf4x4_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16ff_tu(vbfloat16mf4x5_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16ff_tu(vbfloat16mf4x6_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16ff_tu(vbfloat16mf4x7_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16ff_tu(vbfloat16mf4x8_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16ff_tu(vbfloat16mf2x2_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16ff_tu(vbfloat16mf2x3_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16ff_tu(vbfloat16mf2x4_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16ff_tu(vbfloat16mf2x5_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16ff_tu(vbfloat16mf2x6_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16ff_tu(vbfloat16mf2x7_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16ff_tu(vbfloat16mf2x8_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16ff_tu(vbfloat16m1x2_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16ff_tu(vbfloat16m1x3_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16ff_tu(vbfloat16m1x4_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16ff_tu(vbfloat16m1x5_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16ff_tu(vbfloat16m1x6_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16ff_tu(vbfloat16m1x7_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16ff_tu(vbfloat16m1x8_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16ff_tu(vbfloat16m2x2_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16ff_tu(vbfloat16m2x3_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16ff_tu(vbfloat16m2x4_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16ff_tu(vbfloat16m4x2_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vlseg2e16_tum(vbool64_t vm, vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16_tum(vbool64_t vm, vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16_tum(vbool64_t vm, vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16_tum(vbool64_t vm, vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16_tum(vbool64_t vm, vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16_tum(vbool64_t vm, vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16_tum(vbool64_t vm, vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16_tum(vbool32_t vm, vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16_tum(vbool32_t vm, vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16_tum(vbool32_t vm, vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16_tum(vbool32_t vm, vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16_tum(vbool32_t vm, vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16_tum(vbool32_t vm, vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16_tum(vbool32_t vm, vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16_tum(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16_tum(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16_tum(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16_tum(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16_tum(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16_tum(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16_tum(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16_tum(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16_tum(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16_tum(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16_tum(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x2_t __riscv_vlseg2e16ff_tum(vbool64_t vm, vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16ff_tum(vbool64_t vm, vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16ff_tum(vbool64_t vm, vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16ff_tum(vbool64_t vm, vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16ff_tum(vbool64_t vm, vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16ff_tum(vbool64_t vm, vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16ff_tum(vbool64_t vm, vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16ff_tum(vbool32_t vm, vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16ff_tum(vbool32_t vm, vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16ff_tum(vbool32_t vm, vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16ff_tum(vbool32_t vm, vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16ff_tum(vbool32_t vm, vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16ff_tum(vbool32_t vm, vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16ff_tum(vbool32_t vm, vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16ff_tum(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16ff_tum(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16ff_tum(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16ff_tum(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16ff_tum(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16ff_tum(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16ff_tum(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16ff_tum(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16ff_tum(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16ff_tum(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16ff_tum(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vlseg2e16_tumu(vbool64_t vm, vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16_tumu(vbool64_t vm, vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16_tumu(vbool64_t vm, vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16_tumu(vbool64_t vm, vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16_tumu(vbool64_t vm, vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16_tumu(vbool64_t vm, vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16_tumu(vbool64_t vm, vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16_tumu(vbool32_t vm, vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16_tumu(vbool32_t vm, vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16_tumu(vbool32_t vm, vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16_tumu(vbool32_t vm, vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16_tumu(vbool32_t vm, vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16_tumu(vbool32_t vm, vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16_tumu(vbool32_t vm, vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16_tumu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16_tumu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16_tumu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16_tumu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16_tumu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16_tumu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16_tumu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16_tumu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16_tumu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16_tumu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16_tumu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x2_t __riscv_vlseg2e16ff_tumu(vbool64_t vm, vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16ff_tumu(vbool64_t vm, vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16ff_tumu(vbool64_t vm, vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16ff_tumu(vbool64_t vm, vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16ff_tumu(vbool64_t vm, vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16ff_tumu(vbool64_t vm, vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16ff_tumu(vbool64_t vm, vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16ff_tumu(vbool32_t vm, vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16ff_tumu(vbool32_t vm, vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16ff_tumu(vbool32_t vm, vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16ff_tumu(vbool32_t vm, vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16ff_tumu(vbool32_t vm, vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16ff_tumu(vbool32_t vm, vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16ff_tumu(vbool32_t vm, vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16ff_tumu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16ff_tumu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16ff_tumu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16ff_tumu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16ff_tumu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16ff_tumu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16ff_tumu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16ff_tumu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16ff_tumu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16ff_tumu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16ff_tumu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vlseg2e16_mu(vbool64_t vm, vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16_mu(vbool64_t vm, vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16_mu(vbool64_t vm, vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16_mu(vbool64_t vm, vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16_mu(vbool64_t vm, vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16_mu(vbool64_t vm, vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16_mu(vbool64_t vm, vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16_mu(vbool32_t vm, vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16_mu(vbool32_t vm, vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16_mu(vbool32_t vm, vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16_mu(vbool32_t vm, vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16_mu(vbool32_t vm, vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16_mu(vbool32_t vm, vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16_mu(vbool32_t vm, vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16_mu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16_mu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16_mu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16_mu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16_mu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16_mu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16_mu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x2_t __riscv_vlseg2e16ff_mu(vbool64_t vm, vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16ff_mu(vbool64_t vm, vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16ff_mu(vbool64_t vm, vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16ff_mu(vbool64_t vm, vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16ff_mu(vbool64_t vm, vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16ff_mu(vbool64_t vm, vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16ff_mu(vbool64_t vm, vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16ff_mu(vbool32_t vm, vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16ff_mu(vbool32_t vm, vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16ff_mu(vbool32_t vm, vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16ff_mu(vbool32_t vm, vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16ff_mu(vbool32_t vm, vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16ff_mu(vbool32_t vm, vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16ff_mu(vbool32_t vm, vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16ff_mu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16ff_mu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16ff_mu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16ff_mu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16ff_mu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16ff_mu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16ff_mu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16ff_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16ff_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16ff_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16ff_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +---- + +[[policy-variant-overloadedvecrtor-unit-stride-segment-store]] +==== Vector Unit-Stride Segment Store Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-overloadedvector-strided-segment-load]] +==== Vector Strided Segment Load Intrinsics + +[,c] +---- +vbfloat16mf4x2_t __riscv_vlsseg2e16_tu(vbfloat16mf4x2_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x3_t __riscv_vlsseg3e16_tu(vbfloat16mf4x3_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x4_t __riscv_vlsseg4e16_tu(vbfloat16mf4x4_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x5_t __riscv_vlsseg5e16_tu(vbfloat16mf4x5_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x6_t __riscv_vlsseg6e16_tu(vbfloat16mf4x6_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x7_t __riscv_vlsseg7e16_tu(vbfloat16mf4x7_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x8_t __riscv_vlsseg8e16_tu(vbfloat16mf4x8_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x2_t __riscv_vlsseg2e16_tu(vbfloat16mf2x2_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x3_t __riscv_vlsseg3e16_tu(vbfloat16mf2x3_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x4_t __riscv_vlsseg4e16_tu(vbfloat16mf2x4_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x5_t __riscv_vlsseg5e16_tu(vbfloat16mf2x5_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x6_t __riscv_vlsseg6e16_tu(vbfloat16mf2x6_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x7_t __riscv_vlsseg7e16_tu(vbfloat16mf2x7_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x8_t __riscv_vlsseg8e16_tu(vbfloat16mf2x8_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x2_t __riscv_vlsseg2e16_tu(vbfloat16m1x2_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vlsseg3e16_tu(vbfloat16m1x3_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vlsseg4e16_tu(vbfloat16m1x4_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vlsseg5e16_tu(vbfloat16m1x5_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vlsseg6e16_tu(vbfloat16m1x6_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vlsseg7e16_tu(vbfloat16m1x7_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vlsseg8e16_tu(vbfloat16m1x8_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vlsseg2e16_tu(vbfloat16m2x2_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vlsseg3e16_tu(vbfloat16m2x3_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vlsseg4e16_tu(vbfloat16m2x4_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vlsseg2e16_tu(vbfloat16m4x2_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vlsseg2e16_tum(vbool64_t vm, vbfloat16mf4x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vlsseg3e16_tum(vbool64_t vm, vbfloat16mf4x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vlsseg4e16_tum(vbool64_t vm, vbfloat16mf4x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vlsseg5e16_tum(vbool64_t vm, vbfloat16mf4x5_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vlsseg6e16_tum(vbool64_t vm, vbfloat16mf4x6_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vlsseg7e16_tum(vbool64_t vm, vbfloat16mf4x7_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vlsseg8e16_tum(vbool64_t vm, vbfloat16mf4x8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vlsseg2e16_tum(vbool32_t vm, vbfloat16mf2x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vlsseg3e16_tum(vbool32_t vm, vbfloat16mf2x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vlsseg4e16_tum(vbool32_t vm, vbfloat16mf2x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vlsseg5e16_tum(vbool32_t vm, vbfloat16mf2x5_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vlsseg6e16_tum(vbool32_t vm, vbfloat16mf2x6_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vlsseg7e16_tum(vbool32_t vm, vbfloat16mf2x7_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vlsseg8e16_tum(vbool32_t vm, vbfloat16mf2x8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vlsseg2e16_tum(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x3_t __riscv_vlsseg3e16_tum(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x4_t __riscv_vlsseg4e16_tum(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x5_t __riscv_vlsseg5e16_tum(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x6_t __riscv_vlsseg6e16_tum(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x7_t __riscv_vlsseg7e16_tum(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x8_t __riscv_vlsseg8e16_tum(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2x2_t __riscv_vlsseg2e16_tum(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2x3_t __riscv_vlsseg3e16_tum(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2x4_t __riscv_vlsseg4e16_tum(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m4x2_t __riscv_vlsseg2e16_tum(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vlsseg2e16_tumu(vbool64_t vm, vbfloat16mf4x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vlsseg3e16_tumu(vbool64_t vm, vbfloat16mf4x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vlsseg4e16_tumu(vbool64_t vm, vbfloat16mf4x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vlsseg5e16_tumu(vbool64_t vm, vbfloat16mf4x5_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vlsseg6e16_tumu(vbool64_t vm, vbfloat16mf4x6_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vlsseg7e16_tumu(vbool64_t vm, vbfloat16mf4x7_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vlsseg8e16_tumu(vbool64_t vm, vbfloat16mf4x8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vlsseg2e16_tumu(vbool32_t vm, vbfloat16mf2x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vlsseg3e16_tumu(vbool32_t vm, vbfloat16mf2x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vlsseg4e16_tumu(vbool32_t vm, vbfloat16mf2x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vlsseg5e16_tumu(vbool32_t vm, vbfloat16mf2x5_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vlsseg6e16_tumu(vbool32_t vm, vbfloat16mf2x6_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vlsseg7e16_tumu(vbool32_t vm, vbfloat16mf2x7_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vlsseg8e16_tumu(vbool32_t vm, vbfloat16mf2x8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vlsseg2e16_tumu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x3_t __riscv_vlsseg3e16_tumu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x4_t __riscv_vlsseg4e16_tumu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x5_t __riscv_vlsseg5e16_tumu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x6_t __riscv_vlsseg6e16_tumu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x7_t __riscv_vlsseg7e16_tumu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x8_t __riscv_vlsseg8e16_tumu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2x2_t __riscv_vlsseg2e16_tumu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2x3_t __riscv_vlsseg3e16_tumu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2x4_t __riscv_vlsseg4e16_tumu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m4x2_t __riscv_vlsseg2e16_tumu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vlsseg2e16_mu(vbool64_t vm, vbfloat16mf4x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vlsseg3e16_mu(vbool64_t vm, vbfloat16mf4x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vlsseg4e16_mu(vbool64_t vm, vbfloat16mf4x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vlsseg5e16_mu(vbool64_t vm, vbfloat16mf4x5_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vlsseg6e16_mu(vbool64_t vm, vbfloat16mf4x6_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vlsseg7e16_mu(vbool64_t vm, vbfloat16mf4x7_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vlsseg8e16_mu(vbool64_t vm, vbfloat16mf4x8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vlsseg2e16_mu(vbool32_t vm, vbfloat16mf2x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vlsseg3e16_mu(vbool32_t vm, vbfloat16mf2x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vlsseg4e16_mu(vbool32_t vm, vbfloat16mf2x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vlsseg5e16_mu(vbool32_t vm, vbfloat16mf2x5_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vlsseg6e16_mu(vbool32_t vm, vbfloat16mf2x6_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vlsseg7e16_mu(vbool32_t vm, vbfloat16mf2x7_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vlsseg8e16_mu(vbool32_t vm, vbfloat16mf2x8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vlsseg2e16_mu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x3_t __riscv_vlsseg3e16_mu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x4_t __riscv_vlsseg4e16_mu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x5_t __riscv_vlsseg5e16_mu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x6_t __riscv_vlsseg6e16_mu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x7_t __riscv_vlsseg7e16_mu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x8_t __riscv_vlsseg8e16_mu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2x2_t __riscv_vlsseg2e16_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2x3_t __riscv_vlsseg3e16_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2x4_t __riscv_vlsseg4e16_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m4x2_t __riscv_vlsseg2e16_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +---- + +[[policy-variant-overloadedvector-strided-segment-store]] +==== Vector Strided Segment Store Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-overloadedvector-indexed-segment-load]] +==== Vector Indexed Segment Load Intrinsics + +[,c] +---- +vbfloat16mf4x2_t __riscv_vloxseg2ei16_tu(vbfloat16mf4x2_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x3_t __riscv_vloxseg3ei16_tu(vbfloat16mf4x3_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x4_t __riscv_vloxseg4ei16_tu(vbfloat16mf4x4_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x5_t __riscv_vloxseg5ei16_tu(vbfloat16mf4x5_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x6_t __riscv_vloxseg6ei16_tu(vbfloat16mf4x6_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x7_t __riscv_vloxseg7ei16_tu(vbfloat16mf4x7_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x8_t __riscv_vloxseg8ei16_tu(vbfloat16mf4x8_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2x2_t __riscv_vloxseg2ei16_tu(vbfloat16mf2x2_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x3_t __riscv_vloxseg3ei16_tu(vbfloat16mf2x3_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x4_t __riscv_vloxseg4ei16_tu(vbfloat16mf2x4_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x5_t __riscv_vloxseg5ei16_tu(vbfloat16mf2x5_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x6_t __riscv_vloxseg6ei16_tu(vbfloat16mf2x6_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x7_t __riscv_vloxseg7ei16_tu(vbfloat16mf2x7_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x8_t __riscv_vloxseg8ei16_tu(vbfloat16mf2x8_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1x2_t __riscv_vloxseg2ei16_tu(vbfloat16m1x2_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vloxseg3ei16_tu(vbfloat16m1x3_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vloxseg4ei16_tu(vbfloat16m1x4_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vloxseg5ei16_tu(vbfloat16m1x5_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vloxseg6ei16_tu(vbfloat16m1x6_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vloxseg7ei16_tu(vbfloat16m1x7_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vloxseg8ei16_tu(vbfloat16m1x8_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vloxseg2ei16_tu(vbfloat16m2x2_t vd, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vloxseg3ei16_tu(vbfloat16m2x3_t vd, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vloxseg4ei16_tu(vbfloat16m2x4_t vd, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vloxseg2ei16_tu(vbfloat16m4x2_t vd, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +vbfloat16mf4x2_t __riscv_vluxseg2ei16_tu(vbfloat16mf4x2_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x3_t __riscv_vluxseg3ei16_tu(vbfloat16mf4x3_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x4_t __riscv_vluxseg4ei16_tu(vbfloat16mf4x4_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x5_t __riscv_vluxseg5ei16_tu(vbfloat16mf4x5_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x6_t __riscv_vluxseg6ei16_tu(vbfloat16mf4x6_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x7_t __riscv_vluxseg7ei16_tu(vbfloat16mf4x7_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x8_t __riscv_vluxseg8ei16_tu(vbfloat16mf4x8_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2x2_t __riscv_vluxseg2ei16_tu(vbfloat16mf2x2_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x3_t __riscv_vluxseg3ei16_tu(vbfloat16mf2x3_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x4_t __riscv_vluxseg4ei16_tu(vbfloat16mf2x4_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x5_t __riscv_vluxseg5ei16_tu(vbfloat16mf2x5_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x6_t __riscv_vluxseg6ei16_tu(vbfloat16mf2x6_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x7_t __riscv_vluxseg7ei16_tu(vbfloat16mf2x7_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x8_t __riscv_vluxseg8ei16_tu(vbfloat16mf2x8_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1x2_t __riscv_vluxseg2ei16_tu(vbfloat16m1x2_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vluxseg3ei16_tu(vbfloat16m1x3_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vluxseg4ei16_tu(vbfloat16m1x4_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vluxseg5ei16_tu(vbfloat16m1x5_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vluxseg6ei16_tu(vbfloat16m1x6_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vluxseg7ei16_tu(vbfloat16m1x7_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vluxseg8ei16_tu(vbfloat16m1x8_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vluxseg2ei16_tu(vbfloat16m2x2_t vd, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vluxseg3ei16_tu(vbfloat16m2x3_t vd, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vluxseg4ei16_tu(vbfloat16m2x4_t vd, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vluxseg2ei16_tu(vbfloat16m4x2_t vd, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vloxseg2ei16_tum(vbool64_t vm, vbfloat16mf4x2_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vloxseg3ei16_tum(vbool64_t vm, vbfloat16mf4x3_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vloxseg4ei16_tum(vbool64_t vm, vbfloat16mf4x4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vloxseg5ei16_tum(vbool64_t vm, vbfloat16mf4x5_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vloxseg6ei16_tum(vbool64_t vm, vbfloat16mf4x6_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vloxseg7ei16_tum(vbool64_t vm, vbfloat16mf4x7_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vloxseg8ei16_tum(vbool64_t vm, vbfloat16mf4x8_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vloxseg2ei16_tum(vbool32_t vm, vbfloat16mf2x2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vloxseg3ei16_tum(vbool32_t vm, vbfloat16mf2x3_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vloxseg4ei16_tum(vbool32_t vm, vbfloat16mf2x4_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vloxseg5ei16_tum(vbool32_t vm, vbfloat16mf2x5_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vloxseg6ei16_tum(vbool32_t vm, vbfloat16mf2x6_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vloxseg7ei16_tum(vbool32_t vm, vbfloat16mf2x7_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vloxseg8ei16_tum(vbool32_t vm, vbfloat16mf2x8_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vloxseg2ei16_tum(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x3_t __riscv_vloxseg3ei16_tum(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x4_t __riscv_vloxseg4ei16_tum(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x5_t __riscv_vloxseg5ei16_tum(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x6_t __riscv_vloxseg6ei16_tum(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x7_t __riscv_vloxseg7ei16_tum(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x8_t __riscv_vloxseg8ei16_tum(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2x2_t __riscv_vloxseg2ei16_tum(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x3_t __riscv_vloxseg3ei16_tum(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x4_t __riscv_vloxseg4ei16_tum(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4x2_t __riscv_vloxseg2ei16_tum(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16mf4x2_t __riscv_vluxseg2ei16_tum(vbool64_t vm, vbfloat16mf4x2_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vluxseg3ei16_tum(vbool64_t vm, vbfloat16mf4x3_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vluxseg4ei16_tum(vbool64_t vm, vbfloat16mf4x4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vluxseg5ei16_tum(vbool64_t vm, vbfloat16mf4x5_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vluxseg6ei16_tum(vbool64_t vm, vbfloat16mf4x6_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vluxseg7ei16_tum(vbool64_t vm, vbfloat16mf4x7_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vluxseg8ei16_tum(vbool64_t vm, vbfloat16mf4x8_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vluxseg2ei16_tum(vbool32_t vm, vbfloat16mf2x2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vluxseg3ei16_tum(vbool32_t vm, vbfloat16mf2x3_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vluxseg4ei16_tum(vbool32_t vm, vbfloat16mf2x4_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vluxseg5ei16_tum(vbool32_t vm, vbfloat16mf2x5_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vluxseg6ei16_tum(vbool32_t vm, vbfloat16mf2x6_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vluxseg7ei16_tum(vbool32_t vm, vbfloat16mf2x7_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vluxseg8ei16_tum(vbool32_t vm, vbfloat16mf2x8_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vluxseg2ei16_tum(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x3_t __riscv_vluxseg3ei16_tum(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x4_t __riscv_vluxseg4ei16_tum(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x5_t __riscv_vluxseg5ei16_tum(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x6_t __riscv_vluxseg6ei16_tum(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x7_t __riscv_vluxseg7ei16_tum(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x8_t __riscv_vluxseg8ei16_tum(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2x2_t __riscv_vluxseg2ei16_tum(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x3_t __riscv_vluxseg3ei16_tum(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x4_t __riscv_vluxseg4ei16_tum(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4x2_t __riscv_vluxseg2ei16_tum(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vloxseg2ei16_tumu(vbool64_t vm, vbfloat16mf4x2_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vloxseg3ei16_tumu(vbool64_t vm, vbfloat16mf4x3_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vloxseg4ei16_tumu(vbool64_t vm, vbfloat16mf4x4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vloxseg5ei16_tumu(vbool64_t vm, vbfloat16mf4x5_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vloxseg6ei16_tumu(vbool64_t vm, vbfloat16mf4x6_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vloxseg7ei16_tumu(vbool64_t vm, vbfloat16mf4x7_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vloxseg8ei16_tumu(vbool64_t vm, vbfloat16mf4x8_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vloxseg2ei16_tumu(vbool32_t vm, vbfloat16mf2x2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vloxseg3ei16_tumu(vbool32_t vm, vbfloat16mf2x3_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vloxseg4ei16_tumu(vbool32_t vm, vbfloat16mf2x4_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vloxseg5ei16_tumu(vbool32_t vm, vbfloat16mf2x5_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vloxseg6ei16_tumu(vbool32_t vm, vbfloat16mf2x6_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vloxseg7ei16_tumu(vbool32_t vm, vbfloat16mf2x7_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vloxseg8ei16_tumu(vbool32_t vm, vbfloat16mf2x8_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vloxseg2ei16_tumu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x3_t __riscv_vloxseg3ei16_tumu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x4_t __riscv_vloxseg4ei16_tumu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x5_t __riscv_vloxseg5ei16_tumu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x6_t __riscv_vloxseg6ei16_tumu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x7_t __riscv_vloxseg7ei16_tumu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x8_t __riscv_vloxseg8ei16_tumu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2x2_t __riscv_vloxseg2ei16_tumu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x3_t __riscv_vloxseg3ei16_tumu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x4_t __riscv_vloxseg4ei16_tumu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4x2_t __riscv_vloxseg2ei16_tumu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16mf4x2_t __riscv_vluxseg2ei16_tumu(vbool64_t vm, vbfloat16mf4x2_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vluxseg3ei16_tumu(vbool64_t vm, vbfloat16mf4x3_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vluxseg4ei16_tumu(vbool64_t vm, vbfloat16mf4x4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vluxseg5ei16_tumu(vbool64_t vm, vbfloat16mf4x5_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vluxseg6ei16_tumu(vbool64_t vm, vbfloat16mf4x6_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vluxseg7ei16_tumu(vbool64_t vm, vbfloat16mf4x7_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vluxseg8ei16_tumu(vbool64_t vm, vbfloat16mf4x8_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vluxseg2ei16_tumu(vbool32_t vm, vbfloat16mf2x2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vluxseg3ei16_tumu(vbool32_t vm, vbfloat16mf2x3_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vluxseg4ei16_tumu(vbool32_t vm, vbfloat16mf2x4_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vluxseg5ei16_tumu(vbool32_t vm, vbfloat16mf2x5_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vluxseg6ei16_tumu(vbool32_t vm, vbfloat16mf2x6_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vluxseg7ei16_tumu(vbool32_t vm, vbfloat16mf2x7_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vluxseg8ei16_tumu(vbool32_t vm, vbfloat16mf2x8_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vluxseg2ei16_tumu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x3_t __riscv_vluxseg3ei16_tumu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x4_t __riscv_vluxseg4ei16_tumu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x5_t __riscv_vluxseg5ei16_tumu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x6_t __riscv_vluxseg6ei16_tumu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x7_t __riscv_vluxseg7ei16_tumu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x8_t __riscv_vluxseg8ei16_tumu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2x2_t __riscv_vluxseg2ei16_tumu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x3_t __riscv_vluxseg3ei16_tumu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x4_t __riscv_vluxseg4ei16_tumu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4x2_t __riscv_vluxseg2ei16_tumu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vloxseg2ei16_mu(vbool64_t vm, vbfloat16mf4x2_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vloxseg3ei16_mu(vbool64_t vm, vbfloat16mf4x3_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vloxseg4ei16_mu(vbool64_t vm, vbfloat16mf4x4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vloxseg5ei16_mu(vbool64_t vm, vbfloat16mf4x5_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vloxseg6ei16_mu(vbool64_t vm, vbfloat16mf4x6_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vloxseg7ei16_mu(vbool64_t vm, vbfloat16mf4x7_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vloxseg8ei16_mu(vbool64_t vm, vbfloat16mf4x8_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vloxseg2ei16_mu(vbool32_t vm, vbfloat16mf2x2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vloxseg3ei16_mu(vbool32_t vm, vbfloat16mf2x3_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vloxseg4ei16_mu(vbool32_t vm, vbfloat16mf2x4_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vloxseg5ei16_mu(vbool32_t vm, vbfloat16mf2x5_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vloxseg6ei16_mu(vbool32_t vm, vbfloat16mf2x6_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vloxseg7ei16_mu(vbool32_t vm, vbfloat16mf2x7_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vloxseg8ei16_mu(vbool32_t vm, vbfloat16mf2x8_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vloxseg2ei16_mu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x3_t __riscv_vloxseg3ei16_mu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x4_t __riscv_vloxseg4ei16_mu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x5_t __riscv_vloxseg5ei16_mu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x6_t __riscv_vloxseg6ei16_mu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x7_t __riscv_vloxseg7ei16_mu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x8_t __riscv_vloxseg8ei16_mu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2x2_t __riscv_vloxseg2ei16_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x3_t __riscv_vloxseg3ei16_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x4_t __riscv_vloxseg4ei16_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4x2_t __riscv_vloxseg2ei16_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16mf4x2_t __riscv_vluxseg2ei16_mu(vbool64_t vm, vbfloat16mf4x2_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vluxseg3ei16_mu(vbool64_t vm, vbfloat16mf4x3_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vluxseg4ei16_mu(vbool64_t vm, vbfloat16mf4x4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vluxseg5ei16_mu(vbool64_t vm, vbfloat16mf4x5_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vluxseg6ei16_mu(vbool64_t vm, vbfloat16mf4x6_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vluxseg7ei16_mu(vbool64_t vm, vbfloat16mf4x7_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vluxseg8ei16_mu(vbool64_t vm, vbfloat16mf4x8_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vluxseg2ei16_mu(vbool32_t vm, vbfloat16mf2x2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vluxseg3ei16_mu(vbool32_t vm, vbfloat16mf2x3_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vluxseg4ei16_mu(vbool32_t vm, vbfloat16mf2x4_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vluxseg5ei16_mu(vbool32_t vm, vbfloat16mf2x5_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vluxseg6ei16_mu(vbool32_t vm, vbfloat16mf2x6_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vluxseg7ei16_mu(vbool32_t vm, vbfloat16mf2x7_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vluxseg8ei16_mu(vbool32_t vm, vbfloat16mf2x8_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vluxseg2ei16_mu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x3_t __riscv_vluxseg3ei16_mu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x4_t __riscv_vluxseg4ei16_mu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x5_t __riscv_vluxseg5ei16_mu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x6_t __riscv_vluxseg6ei16_mu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x7_t __riscv_vluxseg7ei16_mu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x8_t __riscv_vluxseg8ei16_mu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2x2_t __riscv_vluxseg2ei16_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x3_t __riscv_vluxseg3ei16_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x4_t __riscv_vluxseg4ei16_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4x2_t __riscv_vluxseg2ei16_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +---- + +[[policy-variant-overloadedvector-indexed-segment-store]] +==== Vector Indexed Segment Store Intrinsics +Intrinsics here don't have a policy variant. + +=== BFloat16 Convert Intrinsics + +[[policy-variant-overloadedbf16-vector-narrow-convert]] +==== Vector Narrowing Convert Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vfncvtbf16_f_tu(vbfloat16mf4_t vd, vfloat32mf2_t vs2, + size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_tu(vbfloat16mf2_t vd, vfloat32m1_t vs2, + size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_tu(vbfloat16m1_t vd, vfloat32m2_t vs2, + size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_tu(vbfloat16m2_t vd, vfloat32m4_t vs2, + size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_tu(vbfloat16m4_t vd, vfloat32m8_t vs2, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f_tum(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_tum(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_tum(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_tum(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_tum(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f_tumu(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_tumu(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_tumu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_tumu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_tumu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f_mu(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_mu(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_mu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_mu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_mu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl); +vbfloat16mf4_t __riscv_vfncvtbf16_f_tu(vbfloat16mf4_t vd, vfloat32mf2_t vs2, + unsigned int frm, size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_tu(vbfloat16mf2_t vd, vfloat32m1_t vs2, + unsigned int frm, size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_tu(vbfloat16m1_t vd, vfloat32m2_t vs2, + unsigned int frm, size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_tu(vbfloat16m2_t vd, vfloat32m4_t vs2, + unsigned int frm, size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_tu(vbfloat16m4_t vd, vfloat32m8_t vs2, + unsigned int frm, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f_tum(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, unsigned int frm, + size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_tum(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, unsigned int frm, + size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_tum(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, unsigned int frm, + size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_tum(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, unsigned int frm, + size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_tum(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, unsigned int frm, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f_tumu(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, unsigned int frm, + size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_tumu(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, unsigned int frm, + size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_tumu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, unsigned int frm, + size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_tumu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, unsigned int frm, + size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_tumu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, unsigned int frm, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f_mu(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, unsigned int frm, + size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_mu(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, unsigned int frm, + size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_mu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, unsigned int frm, + size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_mu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, unsigned int frm, + size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_mu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, unsigned int frm, + size_t vl); +---- + +[[policy-variant-overloadedbf16-vector-widening-convert]] +==== Vector Widening Convert Intrinsics + +[,c] +---- +vfloat32mf2_t __riscv_vfwcvtbf16_f_tu(vfloat32mf2_t vd, vbfloat16mf4_t vs2, + size_t vl); +vfloat32m1_t __riscv_vfwcvtbf16_f_tu(vfloat32m1_t vd, vbfloat16mf2_t vs2, + size_t vl); +vfloat32m2_t __riscv_vfwcvtbf16_f_tu(vfloat32m2_t vd, vbfloat16m1_t vs2, + size_t vl); +vfloat32m4_t __riscv_vfwcvtbf16_f_tu(vfloat32m4_t vd, vbfloat16m2_t vs2, + size_t vl); +vfloat32m8_t __riscv_vfwcvtbf16_f_tu(vfloat32m8_t vd, vbfloat16m4_t vs2, + size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwcvtbf16_f_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwcvtbf16_f_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwcvtbf16_f_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwcvtbf16_f_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwcvtbf16_f_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwcvtbf16_f_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwcvtbf16_f_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwcvtbf16_f_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwcvtbf16_f_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwcvtbf16_f_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwcvtbf16_f_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwcvtbf16_f_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwcvtbf16_f_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwcvtbf16_f_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwcvtbf16_f_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl); +---- + +=== BFloat16 Arithmetic Intrinsics + +[[policy-variant-overloadedbf16-widening-multiply-accumulate]] +==== Vector Widening Multiply-Accumulate Intrinsics + +[,c] +---- +vfloat32mf2_t __riscv_vfwmaccbf16_tu(vfloat32mf2_t vd, vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_tu(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_tu(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_tu(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_tu(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwmaccbf16_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, + size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_tum(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_tum(vbool32_t vm, vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, vbfloat16m1_t vs2, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_tum(vbool16_t vm, vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, vbfloat16m2_t vs2, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_tum(vbool8_t vm, vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, vbfloat16m4_t vs2, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_tum(vbool4_t vm, vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwmaccbf16_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, + size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_tumu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_tumu(vbool32_t vm, vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, vbfloat16m1_t vs2, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_tumu(vbool16_t vm, vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, vbfloat16m2_t vs2, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_tumu(vbool8_t vm, vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, vbfloat16m4_t vs2, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_tumu(vbool4_t vm, vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwmaccbf16_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, + size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_mu(vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_mu(vbool32_t vm, vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, vbfloat16m1_t vs2, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_mu(vbool16_t vm, vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, vbfloat16m2_t vs2, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_mu(vbool8_t vm, vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, vbfloat16m4_t vs2, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_mu(vbool4_t vm, vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_tu(vfloat32mf2_t vd, vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, unsigned int frm, + size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, unsigned int frm, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_tu(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, unsigned int frm, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, unsigned int frm, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_tu(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, unsigned int frm, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, unsigned int frm, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_tu(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, unsigned int frm, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, unsigned int frm, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_tu(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, unsigned int frm, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, unsigned int frm, + size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwmaccbf16_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_tum(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_tum(vbool32_t vm, vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, unsigned int frm, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_tum(vbool16_t vm, vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, unsigned int frm, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_tum(vbool8_t vm, vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, unsigned int frm, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_tum(vbool4_t vm, vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, unsigned int frm, + size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwmaccbf16_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_tumu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_tumu(vbool32_t vm, vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, unsigned int frm, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_tumu(vbool16_t vm, vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, unsigned int frm, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_tumu(vbool8_t vm, vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, unsigned int frm, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_tumu(vbool4_t vm, vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, unsigned int frm, + size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwmaccbf16_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_mu(vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, unsigned int frm, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_mu(vbool32_t vm, vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, unsigned int frm, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_mu(vbool16_t vm, vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, unsigned int frm, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_mu(vbool8_t vm, vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, unsigned int frm, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_mu(vbool4_t vm, vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, unsigned int frm, + size_t vl); +---- + +[[policy-variant-overloadedvector-bf16-move]] +==== Vector BFloat16 Move Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vmv_v_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, + size_t vl); +vbfloat16mf2_t __riscv_vmv_v_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, + size_t vl); +vbfloat16m1_t __riscv_vmv_v_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, size_t vl); +vbfloat16m2_t __riscv_vmv_v_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, size_t vl); +vbfloat16m4_t __riscv_vmv_v_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, size_t vl); +vbfloat16m8_t __riscv_vmv_v_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, size_t vl); +---- + +[[policy-variant-overloadedvector-bf16-merge]] +==== Vector BFloat16 Merge Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vmerge_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, vbool64_t v0, size_t vl); +vbfloat16mf2_t __riscv_vmerge_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, vbool32_t v0, size_t vl); +vbfloat16m1_t __riscv_vmerge_tu(vbfloat16m1_t vd, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, vbool16_t v0, size_t vl); +vbfloat16m2_t __riscv_vmerge_tu(vbfloat16m2_t vd, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, vbool8_t v0, size_t vl); +vbfloat16m4_t __riscv_vmerge_tu(vbfloat16m4_t vd, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, vbool4_t v0, size_t vl); +vbfloat16m8_t __riscv_vmerge_tu(vbfloat16m8_t vd, vbfloat16m8_t vs2, + vbfloat16m8_t vs1, vbool2_t v0, size_t vl); +---- + +=== BFloat16 Miscellaneous Vector Utility Intrinsics + +[[policy-variant-overloadedreinterpret-cast-conversion]] +==== Reinterpret Cast Conversion Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-overloadedvector-lmul-extensionn]] +==== Vector LMUL Extension Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-overloadedvector-lmul-truncation]] +==== Vector LMUL Truncation Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-overloadedvector-initialization]] +==== Vector Initialization Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-overloadedvector-insertion]] +==== Vector Insertion Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-overloadedvector-extraction]] +==== Vector Extraction Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-overloadedvector-creation]] +==== Vector Creation Intrinsics +Intrinsics here don't have a policy variant. diff --git a/auto-generated/bfloat16/policy_funcs/overloaded_intrinsic_funcs/00_bfloat16_vector_loads_and_stores_intrinsics.adoc b/auto-generated/bfloat16/policy_funcs/overloaded_intrinsic_funcs/00_bfloat16_vector_loads_and_stores_intrinsics.adoc new file mode 100644 index 000000000..17fec1b34 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded_intrinsic_funcs/00_bfloat16_vector_loads_and_stores_intrinsics.adoc @@ -0,0 +1,334 @@ + +=== BFloat16 Vector Loads and Stores Intrinsics + +[[policy-variant-overloadedbf16-vector-unit-stride-load]] +==== Vector Unit-Stride Load Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vle16_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf2_t __riscv_vle16_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m1_t __riscv_vle16_tu(vbfloat16m1_t vd, const __bf16 *rs1, size_t vl); +vbfloat16m2_t __riscv_vle16_tu(vbfloat16m2_t vd, const __bf16 *rs1, size_t vl); +vbfloat16m4_t __riscv_vle16_tu(vbfloat16m4_t vd, const __bf16 *rs1, size_t vl); +vbfloat16m8_t __riscv_vle16_tu(vbfloat16m8_t vd, const __bf16 *rs1, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vle16_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2_t __riscv_vle16_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1_t __riscv_vle16_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2_t __riscv_vle16_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m4_t __riscv_vle16_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m8_t __riscv_vle16_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vle16_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2_t __riscv_vle16_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1_t __riscv_vle16_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2_t __riscv_vle16_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m4_t __riscv_vle16_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m8_t __riscv_vle16_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vle16_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2_t __riscv_vle16_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1_t __riscv_vle16_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2_t __riscv_vle16_mu(vbool8_t vm, vbfloat16m2_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m4_t __riscv_vle16_mu(vbool4_t vm, vbfloat16m4_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m8_t __riscv_vle16_mu(vbool2_t vm, vbfloat16m8_t vd, const __bf16 *rs1, + size_t vl); +---- + +[[policy-variant-overloadedbf16-vector-unit-stride-store]] +==== Vector Unit-Stride Store Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-overloadedvector-strided-load]] +==== Vector Strided Load Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vlse16_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vlse16_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1_t __riscv_vlse16_tu(vbfloat16m1_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2_t __riscv_vlse16_tu(vbfloat16m2_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m4_t __riscv_vlse16_tu(vbfloat16m4_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m8_t __riscv_vlse16_tu(vbfloat16m8_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vlse16_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vlse16_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +vbfloat16m1_t __riscv_vlse16_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +vbfloat16m2_t __riscv_vlse16_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +vbfloat16m4_t __riscv_vlse16_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +vbfloat16m8_t __riscv_vlse16_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vlse16_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vlse16_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +vbfloat16m1_t __riscv_vlse16_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +vbfloat16m2_t __riscv_vlse16_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +vbfloat16m4_t __riscv_vlse16_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +vbfloat16m8_t __riscv_vlse16_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vlse16_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vlse16_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +vbfloat16m1_t __riscv_vlse16_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +vbfloat16m2_t __riscv_vlse16_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +vbfloat16m4_t __riscv_vlse16_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +vbfloat16m8_t __riscv_vlse16_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, size_t vl); +---- + +[[policy-variant-overloadedvector-strided-store]] +==== Vector Strided Store Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-overloadedvector-indexed-load]] +==== Vector Indexed Load Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vloxei16_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vloxei16_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vloxei16_tu(vbfloat16m1_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2_t __riscv_vloxei16_tu(vbfloat16m2_t vd, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4_t __riscv_vloxei16_tu(vbfloat16m4_t vd, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +vbfloat16m8_t __riscv_vloxei16_tu(vbfloat16m8_t vd, const __bf16 *rs1, + vuint16m8_t rs2, size_t vl); +vbfloat16mf4_t __riscv_vluxei16_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2_t __riscv_vluxei16_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1_t __riscv_vluxei16_tu(vbfloat16m1_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2_t __riscv_vluxei16_tu(vbfloat16m2_t vd, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4_t __riscv_vluxei16_tu(vbfloat16m4_t vd, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +vbfloat16m8_t __riscv_vluxei16_tu(vbfloat16m8_t vd, const __bf16 *rs1, + vuint16m8_t rs2, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vloxei16_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2_t __riscv_vloxei16_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1_t __riscv_vloxei16_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vloxei16_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vloxei16_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vloxei16_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +vbfloat16mf4_t __riscv_vluxei16_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2_t __riscv_vluxei16_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1_t __riscv_vluxei16_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vluxei16_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vluxei16_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vluxei16_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vloxei16_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2_t __riscv_vloxei16_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1_t __riscv_vloxei16_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vloxei16_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vloxei16_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vloxei16_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +vbfloat16mf4_t __riscv_vluxei16_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2_t __riscv_vluxei16_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1_t __riscv_vluxei16_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vluxei16_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vluxei16_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vluxei16_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vloxei16_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2_t __riscv_vloxei16_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1_t __riscv_vloxei16_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vloxei16_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vloxei16_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vloxei16_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +vbfloat16mf4_t __riscv_vluxei16_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2_t __riscv_vluxei16_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1_t __riscv_vluxei16_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2_t __riscv_vluxei16_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4_t __riscv_vluxei16_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16m8_t __riscv_vluxei16_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, vuint16m8_t rs2, + size_t vl); +---- + +[[policy-variant-overloadedvector-indexed-store]] +==== Vector Indexed Store Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-overloadedunit-stride-fault-only-first-loads]] +==== Unit-stride Fault-Only-First Loads Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vle16ff_tu(vbfloat16mf4_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2_t __riscv_vle16ff_tu(vbfloat16mf2_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1_t __riscv_vle16ff_tu(vbfloat16m1_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2_t __riscv_vle16ff_tu(vbfloat16m2_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m4_t __riscv_vle16ff_tu(vbfloat16m4_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m8_t __riscv_vle16ff_tu(vbfloat16m8_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vle16ff_tum(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2_t __riscv_vle16ff_tum(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1_t __riscv_vle16ff_tum(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t *new_vl, size_t vl); +vbfloat16m2_t __riscv_vle16ff_tum(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t *new_vl, size_t vl); +vbfloat16m4_t __riscv_vle16ff_tum(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t *new_vl, size_t vl); +vbfloat16m8_t __riscv_vle16ff_tum(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t *new_vl, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vle16ff_tumu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2_t __riscv_vle16ff_tumu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1_t __riscv_vle16ff_tumu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m2_t __riscv_vle16ff_tumu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m4_t __riscv_vle16ff_tumu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m8_t __riscv_vle16ff_tumu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vle16ff_mu(vbool64_t vm, vbfloat16mf4_t vd, + const __bf16 *rs1, size_t *new_vl, size_t vl); +vbfloat16mf2_t __riscv_vle16ff_mu(vbool32_t vm, vbfloat16mf2_t vd, + const __bf16 *rs1, size_t *new_vl, size_t vl); +vbfloat16m1_t __riscv_vle16ff_mu(vbool16_t vm, vbfloat16m1_t vd, + const __bf16 *rs1, size_t *new_vl, size_t vl); +vbfloat16m2_t __riscv_vle16ff_mu(vbool8_t vm, vbfloat16m2_t vd, + const __bf16 *rs1, size_t *new_vl, size_t vl); +vbfloat16m4_t __riscv_vle16ff_mu(vbool4_t vm, vbfloat16m4_t vd, + const __bf16 *rs1, size_t *new_vl, size_t vl); +vbfloat16m8_t __riscv_vle16ff_mu(vbool2_t vm, vbfloat16m8_t vd, + const __bf16 *rs1, size_t *new_vl, size_t vl); +---- diff --git a/auto-generated/bfloat16/policy_funcs/overloaded_intrinsic_funcs/01_bfloat16_vector_loads_and_stores_segment_intrinsics.adoc b/auto-generated/bfloat16/policy_funcs/overloaded_intrinsic_funcs/01_bfloat16_vector_loads_and_stores_segment_intrinsics.adoc new file mode 100644 index 000000000..507b4155e --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded_intrinsic_funcs/01_bfloat16_vector_loads_and_stores_segment_intrinsics.adoc @@ -0,0 +1,1344 @@ + +=== BFloat16 Vector Loads and Stores Segment Intrinsics + +[[policy-variant-overloadedvector-unit-stride-segment-load]] +==== Vector Unit-Stride Segment Load Intrinsics + +[,c] +---- +vbfloat16mf4x2_t __riscv_vlseg2e16_tu(vbfloat16mf4x2_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16_tu(vbfloat16mf4x3_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16_tu(vbfloat16mf4x4_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16_tu(vbfloat16mf4x5_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16_tu(vbfloat16mf4x6_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16_tu(vbfloat16mf4x7_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16_tu(vbfloat16mf4x8_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16_tu(vbfloat16mf2x2_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16_tu(vbfloat16mf2x3_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16_tu(vbfloat16mf2x4_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16_tu(vbfloat16mf2x5_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16_tu(vbfloat16mf2x6_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16_tu(vbfloat16mf2x7_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16_tu(vbfloat16mf2x8_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16_tu(vbfloat16m1x2_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16_tu(vbfloat16m1x3_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16_tu(vbfloat16m1x4_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16_tu(vbfloat16m1x5_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16_tu(vbfloat16m1x6_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16_tu(vbfloat16m1x7_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16_tu(vbfloat16m1x8_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16_tu(vbfloat16m2x2_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16_tu(vbfloat16m2x3_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16_tu(vbfloat16m2x4_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16_tu(vbfloat16m4x2_t vd, const __bf16 *rs1, + size_t vl); +vbfloat16mf4x2_t __riscv_vlseg2e16ff_tu(vbfloat16mf4x2_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16ff_tu(vbfloat16mf4x3_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16ff_tu(vbfloat16mf4x4_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16ff_tu(vbfloat16mf4x5_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16ff_tu(vbfloat16mf4x6_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16ff_tu(vbfloat16mf4x7_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16ff_tu(vbfloat16mf4x8_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16ff_tu(vbfloat16mf2x2_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16ff_tu(vbfloat16mf2x3_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16ff_tu(vbfloat16mf2x4_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16ff_tu(vbfloat16mf2x5_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16ff_tu(vbfloat16mf2x6_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16ff_tu(vbfloat16mf2x7_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16ff_tu(vbfloat16mf2x8_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16ff_tu(vbfloat16m1x2_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16ff_tu(vbfloat16m1x3_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16ff_tu(vbfloat16m1x4_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16ff_tu(vbfloat16m1x5_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16ff_tu(vbfloat16m1x6_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16ff_tu(vbfloat16m1x7_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16ff_tu(vbfloat16m1x8_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16ff_tu(vbfloat16m2x2_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16ff_tu(vbfloat16m2x3_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16ff_tu(vbfloat16m2x4_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16ff_tu(vbfloat16m4x2_t vd, const __bf16 *rs1, + size_t *new_vl, size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vlseg2e16_tum(vbool64_t vm, vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16_tum(vbool64_t vm, vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16_tum(vbool64_t vm, vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16_tum(vbool64_t vm, vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16_tum(vbool64_t vm, vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16_tum(vbool64_t vm, vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16_tum(vbool64_t vm, vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16_tum(vbool32_t vm, vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16_tum(vbool32_t vm, vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16_tum(vbool32_t vm, vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16_tum(vbool32_t vm, vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16_tum(vbool32_t vm, vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16_tum(vbool32_t vm, vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16_tum(vbool32_t vm, vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16_tum(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16_tum(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16_tum(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16_tum(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16_tum(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16_tum(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16_tum(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16_tum(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16_tum(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16_tum(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16_tum(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x2_t __riscv_vlseg2e16ff_tum(vbool64_t vm, vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16ff_tum(vbool64_t vm, vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16ff_tum(vbool64_t vm, vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16ff_tum(vbool64_t vm, vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16ff_tum(vbool64_t vm, vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16ff_tum(vbool64_t vm, vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16ff_tum(vbool64_t vm, vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16ff_tum(vbool32_t vm, vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16ff_tum(vbool32_t vm, vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16ff_tum(vbool32_t vm, vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16ff_tum(vbool32_t vm, vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16ff_tum(vbool32_t vm, vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16ff_tum(vbool32_t vm, vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16ff_tum(vbool32_t vm, vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16ff_tum(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16ff_tum(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16ff_tum(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16ff_tum(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16ff_tum(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16ff_tum(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16ff_tum(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16ff_tum(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16ff_tum(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16ff_tum(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16ff_tum(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vlseg2e16_tumu(vbool64_t vm, vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16_tumu(vbool64_t vm, vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16_tumu(vbool64_t vm, vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16_tumu(vbool64_t vm, vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16_tumu(vbool64_t vm, vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16_tumu(vbool64_t vm, vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16_tumu(vbool64_t vm, vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16_tumu(vbool32_t vm, vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16_tumu(vbool32_t vm, vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16_tumu(vbool32_t vm, vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16_tumu(vbool32_t vm, vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16_tumu(vbool32_t vm, vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16_tumu(vbool32_t vm, vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16_tumu(vbool32_t vm, vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16_tumu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16_tumu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16_tumu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16_tumu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16_tumu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16_tumu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16_tumu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16_tumu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16_tumu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16_tumu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16_tumu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x2_t __riscv_vlseg2e16ff_tumu(vbool64_t vm, vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16ff_tumu(vbool64_t vm, vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16ff_tumu(vbool64_t vm, vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16ff_tumu(vbool64_t vm, vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16ff_tumu(vbool64_t vm, vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16ff_tumu(vbool64_t vm, vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16ff_tumu(vbool64_t vm, vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16ff_tumu(vbool32_t vm, vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16ff_tumu(vbool32_t vm, vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16ff_tumu(vbool32_t vm, vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16ff_tumu(vbool32_t vm, vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16ff_tumu(vbool32_t vm, vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16ff_tumu(vbool32_t vm, vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16ff_tumu(vbool32_t vm, vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16ff_tumu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16ff_tumu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16ff_tumu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16ff_tumu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16ff_tumu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16ff_tumu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16ff_tumu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16ff_tumu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16ff_tumu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16ff_tumu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16ff_tumu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vlseg2e16_mu(vbool64_t vm, vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16_mu(vbool64_t vm, vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16_mu(vbool64_t vm, vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16_mu(vbool64_t vm, vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16_mu(vbool64_t vm, vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16_mu(vbool64_t vm, vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16_mu(vbool64_t vm, vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16_mu(vbool32_t vm, vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16_mu(vbool32_t vm, vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16_mu(vbool32_t vm, vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16_mu(vbool32_t vm, vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16_mu(vbool32_t vm, vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16_mu(vbool32_t vm, vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16_mu(vbool32_t vm, vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16_mu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16_mu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16_mu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16_mu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16_mu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16_mu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16_mu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t vl); +vbfloat16mf4x2_t __riscv_vlseg2e16ff_mu(vbool64_t vm, vbfloat16mf4x2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x3_t __riscv_vlseg3e16ff_mu(vbool64_t vm, vbfloat16mf4x3_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x4_t __riscv_vlseg4e16ff_mu(vbool64_t vm, vbfloat16mf4x4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x5_t __riscv_vlseg5e16ff_mu(vbool64_t vm, vbfloat16mf4x5_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x6_t __riscv_vlseg6e16ff_mu(vbool64_t vm, vbfloat16mf4x6_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x7_t __riscv_vlseg7e16ff_mu(vbool64_t vm, vbfloat16mf4x7_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf4x8_t __riscv_vlseg8e16ff_mu(vbool64_t vm, vbfloat16mf4x8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x2_t __riscv_vlseg2e16ff_mu(vbool32_t vm, vbfloat16mf2x2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x3_t __riscv_vlseg3e16ff_mu(vbool32_t vm, vbfloat16mf2x3_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x4_t __riscv_vlseg4e16ff_mu(vbool32_t vm, vbfloat16mf2x4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x5_t __riscv_vlseg5e16ff_mu(vbool32_t vm, vbfloat16mf2x5_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x6_t __riscv_vlseg6e16ff_mu(vbool32_t vm, vbfloat16mf2x6_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x7_t __riscv_vlseg7e16ff_mu(vbool32_t vm, vbfloat16mf2x7_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16mf2x8_t __riscv_vlseg8e16ff_mu(vbool32_t vm, vbfloat16mf2x8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x2_t __riscv_vlseg2e16ff_mu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x3_t __riscv_vlseg3e16ff_mu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x4_t __riscv_vlseg4e16ff_mu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x5_t __riscv_vlseg5e16ff_mu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x6_t __riscv_vlseg6e16ff_mu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x7_t __riscv_vlseg7e16ff_mu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m1x8_t __riscv_vlseg8e16ff_mu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m2x2_t __riscv_vlseg2e16ff_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m2x3_t __riscv_vlseg3e16ff_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m2x4_t __riscv_vlseg4e16ff_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +vbfloat16m4x2_t __riscv_vlseg2e16ff_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, size_t *new_vl, + size_t vl); +---- + +[[policy-variant-overloadedvecrtor-unit-stride-segment-store]] +==== Vector Unit-Stride Segment Store Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-overloadedvector-strided-segment-load]] +==== Vector Strided Segment Load Intrinsics + +[,c] +---- +vbfloat16mf4x2_t __riscv_vlsseg2e16_tu(vbfloat16mf4x2_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x3_t __riscv_vlsseg3e16_tu(vbfloat16mf4x3_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x4_t __riscv_vlsseg4e16_tu(vbfloat16mf4x4_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x5_t __riscv_vlsseg5e16_tu(vbfloat16mf4x5_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x6_t __riscv_vlsseg6e16_tu(vbfloat16mf4x6_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x7_t __riscv_vlsseg7e16_tu(vbfloat16mf4x7_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf4x8_t __riscv_vlsseg8e16_tu(vbfloat16mf4x8_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x2_t __riscv_vlsseg2e16_tu(vbfloat16mf2x2_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x3_t __riscv_vlsseg3e16_tu(vbfloat16mf2x3_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x4_t __riscv_vlsseg4e16_tu(vbfloat16mf2x4_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x5_t __riscv_vlsseg5e16_tu(vbfloat16mf2x5_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x6_t __riscv_vlsseg6e16_tu(vbfloat16mf2x6_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x7_t __riscv_vlsseg7e16_tu(vbfloat16mf2x7_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16mf2x8_t __riscv_vlsseg8e16_tu(vbfloat16mf2x8_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x2_t __riscv_vlsseg2e16_tu(vbfloat16m1x2_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vlsseg3e16_tu(vbfloat16m1x3_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vlsseg4e16_tu(vbfloat16m1x4_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vlsseg5e16_tu(vbfloat16m1x5_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vlsseg6e16_tu(vbfloat16m1x6_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vlsseg7e16_tu(vbfloat16m1x7_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vlsseg8e16_tu(vbfloat16m1x8_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vlsseg2e16_tu(vbfloat16m2x2_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vlsseg3e16_tu(vbfloat16m2x3_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vlsseg4e16_tu(vbfloat16m2x4_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vlsseg2e16_tu(vbfloat16m4x2_t vd, const __bf16 *rs1, + ptrdiff_t rs2, size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vlsseg2e16_tum(vbool64_t vm, vbfloat16mf4x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vlsseg3e16_tum(vbool64_t vm, vbfloat16mf4x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vlsseg4e16_tum(vbool64_t vm, vbfloat16mf4x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vlsseg5e16_tum(vbool64_t vm, vbfloat16mf4x5_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vlsseg6e16_tum(vbool64_t vm, vbfloat16mf4x6_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vlsseg7e16_tum(vbool64_t vm, vbfloat16mf4x7_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vlsseg8e16_tum(vbool64_t vm, vbfloat16mf4x8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vlsseg2e16_tum(vbool32_t vm, vbfloat16mf2x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vlsseg3e16_tum(vbool32_t vm, vbfloat16mf2x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vlsseg4e16_tum(vbool32_t vm, vbfloat16mf2x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vlsseg5e16_tum(vbool32_t vm, vbfloat16mf2x5_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vlsseg6e16_tum(vbool32_t vm, vbfloat16mf2x6_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vlsseg7e16_tum(vbool32_t vm, vbfloat16mf2x7_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vlsseg8e16_tum(vbool32_t vm, vbfloat16mf2x8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vlsseg2e16_tum(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x3_t __riscv_vlsseg3e16_tum(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x4_t __riscv_vlsseg4e16_tum(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x5_t __riscv_vlsseg5e16_tum(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x6_t __riscv_vlsseg6e16_tum(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x7_t __riscv_vlsseg7e16_tum(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x8_t __riscv_vlsseg8e16_tum(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2x2_t __riscv_vlsseg2e16_tum(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2x3_t __riscv_vlsseg3e16_tum(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2x4_t __riscv_vlsseg4e16_tum(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m4x2_t __riscv_vlsseg2e16_tum(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vlsseg2e16_tumu(vbool64_t vm, vbfloat16mf4x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vlsseg3e16_tumu(vbool64_t vm, vbfloat16mf4x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vlsseg4e16_tumu(vbool64_t vm, vbfloat16mf4x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vlsseg5e16_tumu(vbool64_t vm, vbfloat16mf4x5_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vlsseg6e16_tumu(vbool64_t vm, vbfloat16mf4x6_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vlsseg7e16_tumu(vbool64_t vm, vbfloat16mf4x7_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vlsseg8e16_tumu(vbool64_t vm, vbfloat16mf4x8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vlsseg2e16_tumu(vbool32_t vm, vbfloat16mf2x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vlsseg3e16_tumu(vbool32_t vm, vbfloat16mf2x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vlsseg4e16_tumu(vbool32_t vm, vbfloat16mf2x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vlsseg5e16_tumu(vbool32_t vm, vbfloat16mf2x5_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vlsseg6e16_tumu(vbool32_t vm, vbfloat16mf2x6_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vlsseg7e16_tumu(vbool32_t vm, vbfloat16mf2x7_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vlsseg8e16_tumu(vbool32_t vm, vbfloat16mf2x8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vlsseg2e16_tumu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x3_t __riscv_vlsseg3e16_tumu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x4_t __riscv_vlsseg4e16_tumu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x5_t __riscv_vlsseg5e16_tumu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x6_t __riscv_vlsseg6e16_tumu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x7_t __riscv_vlsseg7e16_tumu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x8_t __riscv_vlsseg8e16_tumu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2x2_t __riscv_vlsseg2e16_tumu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2x3_t __riscv_vlsseg3e16_tumu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2x4_t __riscv_vlsseg4e16_tumu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m4x2_t __riscv_vlsseg2e16_tumu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vlsseg2e16_mu(vbool64_t vm, vbfloat16mf4x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vlsseg3e16_mu(vbool64_t vm, vbfloat16mf4x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vlsseg4e16_mu(vbool64_t vm, vbfloat16mf4x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vlsseg5e16_mu(vbool64_t vm, vbfloat16mf4x5_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vlsseg6e16_mu(vbool64_t vm, vbfloat16mf4x6_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vlsseg7e16_mu(vbool64_t vm, vbfloat16mf4x7_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vlsseg8e16_mu(vbool64_t vm, vbfloat16mf4x8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vlsseg2e16_mu(vbool32_t vm, vbfloat16mf2x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vlsseg3e16_mu(vbool32_t vm, vbfloat16mf2x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vlsseg4e16_mu(vbool32_t vm, vbfloat16mf2x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vlsseg5e16_mu(vbool32_t vm, vbfloat16mf2x5_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vlsseg6e16_mu(vbool32_t vm, vbfloat16mf2x6_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vlsseg7e16_mu(vbool32_t vm, vbfloat16mf2x7_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vlsseg8e16_mu(vbool32_t vm, vbfloat16mf2x8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vlsseg2e16_mu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x3_t __riscv_vlsseg3e16_mu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x4_t __riscv_vlsseg4e16_mu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x5_t __riscv_vlsseg5e16_mu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x6_t __riscv_vlsseg6e16_mu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x7_t __riscv_vlsseg7e16_mu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m1x8_t __riscv_vlsseg8e16_mu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2x2_t __riscv_vlsseg2e16_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2x3_t __riscv_vlsseg3e16_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m2x4_t __riscv_vlsseg4e16_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +vbfloat16m4x2_t __riscv_vlsseg2e16_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, ptrdiff_t rs2, + size_t vl); +---- + +[[policy-variant-overloadedvector-strided-segment-store]] +==== Vector Strided Segment Store Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-overloadedvector-indexed-segment-load]] +==== Vector Indexed Segment Load Intrinsics + +[,c] +---- +vbfloat16mf4x2_t __riscv_vloxseg2ei16_tu(vbfloat16mf4x2_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x3_t __riscv_vloxseg3ei16_tu(vbfloat16mf4x3_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x4_t __riscv_vloxseg4ei16_tu(vbfloat16mf4x4_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x5_t __riscv_vloxseg5ei16_tu(vbfloat16mf4x5_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x6_t __riscv_vloxseg6ei16_tu(vbfloat16mf4x6_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x7_t __riscv_vloxseg7ei16_tu(vbfloat16mf4x7_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x8_t __riscv_vloxseg8ei16_tu(vbfloat16mf4x8_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2x2_t __riscv_vloxseg2ei16_tu(vbfloat16mf2x2_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x3_t __riscv_vloxseg3ei16_tu(vbfloat16mf2x3_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x4_t __riscv_vloxseg4ei16_tu(vbfloat16mf2x4_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x5_t __riscv_vloxseg5ei16_tu(vbfloat16mf2x5_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x6_t __riscv_vloxseg6ei16_tu(vbfloat16mf2x6_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x7_t __riscv_vloxseg7ei16_tu(vbfloat16mf2x7_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x8_t __riscv_vloxseg8ei16_tu(vbfloat16mf2x8_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1x2_t __riscv_vloxseg2ei16_tu(vbfloat16m1x2_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vloxseg3ei16_tu(vbfloat16m1x3_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vloxseg4ei16_tu(vbfloat16m1x4_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vloxseg5ei16_tu(vbfloat16m1x5_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vloxseg6ei16_tu(vbfloat16m1x6_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vloxseg7ei16_tu(vbfloat16m1x7_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vloxseg8ei16_tu(vbfloat16m1x8_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vloxseg2ei16_tu(vbfloat16m2x2_t vd, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vloxseg3ei16_tu(vbfloat16m2x3_t vd, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vloxseg4ei16_tu(vbfloat16m2x4_t vd, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vloxseg2ei16_tu(vbfloat16m4x2_t vd, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +vbfloat16mf4x2_t __riscv_vluxseg2ei16_tu(vbfloat16mf4x2_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x3_t __riscv_vluxseg3ei16_tu(vbfloat16mf4x3_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x4_t __riscv_vluxseg4ei16_tu(vbfloat16mf4x4_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x5_t __riscv_vluxseg5ei16_tu(vbfloat16mf4x5_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x6_t __riscv_vluxseg6ei16_tu(vbfloat16mf4x6_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x7_t __riscv_vluxseg7ei16_tu(vbfloat16mf4x7_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf4x8_t __riscv_vluxseg8ei16_tu(vbfloat16mf4x8_t vd, const __bf16 *rs1, + vuint16mf4_t rs2, size_t vl); +vbfloat16mf2x2_t __riscv_vluxseg2ei16_tu(vbfloat16mf2x2_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x3_t __riscv_vluxseg3ei16_tu(vbfloat16mf2x3_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x4_t __riscv_vluxseg4ei16_tu(vbfloat16mf2x4_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x5_t __riscv_vluxseg5ei16_tu(vbfloat16mf2x5_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x6_t __riscv_vluxseg6ei16_tu(vbfloat16mf2x6_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x7_t __riscv_vluxseg7ei16_tu(vbfloat16mf2x7_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16mf2x8_t __riscv_vluxseg8ei16_tu(vbfloat16mf2x8_t vd, const __bf16 *rs1, + vuint16mf2_t rs2, size_t vl); +vbfloat16m1x2_t __riscv_vluxseg2ei16_tu(vbfloat16m1x2_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x3_t __riscv_vluxseg3ei16_tu(vbfloat16m1x3_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x4_t __riscv_vluxseg4ei16_tu(vbfloat16m1x4_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x5_t __riscv_vluxseg5ei16_tu(vbfloat16m1x5_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x6_t __riscv_vluxseg6ei16_tu(vbfloat16m1x6_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x7_t __riscv_vluxseg7ei16_tu(vbfloat16m1x7_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m1x8_t __riscv_vluxseg8ei16_tu(vbfloat16m1x8_t vd, const __bf16 *rs1, + vuint16m1_t rs2, size_t vl); +vbfloat16m2x2_t __riscv_vluxseg2ei16_tu(vbfloat16m2x2_t vd, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x3_t __riscv_vluxseg3ei16_tu(vbfloat16m2x3_t vd, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m2x4_t __riscv_vluxseg4ei16_tu(vbfloat16m2x4_t vd, const __bf16 *rs1, + vuint16m2_t rs2, size_t vl); +vbfloat16m4x2_t __riscv_vluxseg2ei16_tu(vbfloat16m4x2_t vd, const __bf16 *rs1, + vuint16m4_t rs2, size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vloxseg2ei16_tum(vbool64_t vm, vbfloat16mf4x2_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vloxseg3ei16_tum(vbool64_t vm, vbfloat16mf4x3_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vloxseg4ei16_tum(vbool64_t vm, vbfloat16mf4x4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vloxseg5ei16_tum(vbool64_t vm, vbfloat16mf4x5_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vloxseg6ei16_tum(vbool64_t vm, vbfloat16mf4x6_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vloxseg7ei16_tum(vbool64_t vm, vbfloat16mf4x7_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vloxseg8ei16_tum(vbool64_t vm, vbfloat16mf4x8_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vloxseg2ei16_tum(vbool32_t vm, vbfloat16mf2x2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vloxseg3ei16_tum(vbool32_t vm, vbfloat16mf2x3_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vloxseg4ei16_tum(vbool32_t vm, vbfloat16mf2x4_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vloxseg5ei16_tum(vbool32_t vm, vbfloat16mf2x5_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vloxseg6ei16_tum(vbool32_t vm, vbfloat16mf2x6_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vloxseg7ei16_tum(vbool32_t vm, vbfloat16mf2x7_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vloxseg8ei16_tum(vbool32_t vm, vbfloat16mf2x8_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vloxseg2ei16_tum(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x3_t __riscv_vloxseg3ei16_tum(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x4_t __riscv_vloxseg4ei16_tum(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x5_t __riscv_vloxseg5ei16_tum(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x6_t __riscv_vloxseg6ei16_tum(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x7_t __riscv_vloxseg7ei16_tum(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x8_t __riscv_vloxseg8ei16_tum(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2x2_t __riscv_vloxseg2ei16_tum(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x3_t __riscv_vloxseg3ei16_tum(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x4_t __riscv_vloxseg4ei16_tum(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4x2_t __riscv_vloxseg2ei16_tum(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16mf4x2_t __riscv_vluxseg2ei16_tum(vbool64_t vm, vbfloat16mf4x2_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vluxseg3ei16_tum(vbool64_t vm, vbfloat16mf4x3_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vluxseg4ei16_tum(vbool64_t vm, vbfloat16mf4x4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vluxseg5ei16_tum(vbool64_t vm, vbfloat16mf4x5_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vluxseg6ei16_tum(vbool64_t vm, vbfloat16mf4x6_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vluxseg7ei16_tum(vbool64_t vm, vbfloat16mf4x7_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vluxseg8ei16_tum(vbool64_t vm, vbfloat16mf4x8_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vluxseg2ei16_tum(vbool32_t vm, vbfloat16mf2x2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vluxseg3ei16_tum(vbool32_t vm, vbfloat16mf2x3_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vluxseg4ei16_tum(vbool32_t vm, vbfloat16mf2x4_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vluxseg5ei16_tum(vbool32_t vm, vbfloat16mf2x5_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vluxseg6ei16_tum(vbool32_t vm, vbfloat16mf2x6_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vluxseg7ei16_tum(vbool32_t vm, vbfloat16mf2x7_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vluxseg8ei16_tum(vbool32_t vm, vbfloat16mf2x8_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vluxseg2ei16_tum(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x3_t __riscv_vluxseg3ei16_tum(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x4_t __riscv_vluxseg4ei16_tum(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x5_t __riscv_vluxseg5ei16_tum(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x6_t __riscv_vluxseg6ei16_tum(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x7_t __riscv_vluxseg7ei16_tum(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x8_t __riscv_vluxseg8ei16_tum(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2x2_t __riscv_vluxseg2ei16_tum(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x3_t __riscv_vluxseg3ei16_tum(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x4_t __riscv_vluxseg4ei16_tum(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4x2_t __riscv_vluxseg2ei16_tum(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vloxseg2ei16_tumu(vbool64_t vm, vbfloat16mf4x2_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vloxseg3ei16_tumu(vbool64_t vm, vbfloat16mf4x3_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vloxseg4ei16_tumu(vbool64_t vm, vbfloat16mf4x4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vloxseg5ei16_tumu(vbool64_t vm, vbfloat16mf4x5_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vloxseg6ei16_tumu(vbool64_t vm, vbfloat16mf4x6_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vloxseg7ei16_tumu(vbool64_t vm, vbfloat16mf4x7_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vloxseg8ei16_tumu(vbool64_t vm, vbfloat16mf4x8_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vloxseg2ei16_tumu(vbool32_t vm, vbfloat16mf2x2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vloxseg3ei16_tumu(vbool32_t vm, vbfloat16mf2x3_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vloxseg4ei16_tumu(vbool32_t vm, vbfloat16mf2x4_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vloxseg5ei16_tumu(vbool32_t vm, vbfloat16mf2x5_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vloxseg6ei16_tumu(vbool32_t vm, vbfloat16mf2x6_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vloxseg7ei16_tumu(vbool32_t vm, vbfloat16mf2x7_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vloxseg8ei16_tumu(vbool32_t vm, vbfloat16mf2x8_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vloxseg2ei16_tumu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x3_t __riscv_vloxseg3ei16_tumu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x4_t __riscv_vloxseg4ei16_tumu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x5_t __riscv_vloxseg5ei16_tumu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x6_t __riscv_vloxseg6ei16_tumu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x7_t __riscv_vloxseg7ei16_tumu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x8_t __riscv_vloxseg8ei16_tumu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2x2_t __riscv_vloxseg2ei16_tumu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x3_t __riscv_vloxseg3ei16_tumu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x4_t __riscv_vloxseg4ei16_tumu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4x2_t __riscv_vloxseg2ei16_tumu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16mf4x2_t __riscv_vluxseg2ei16_tumu(vbool64_t vm, vbfloat16mf4x2_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vluxseg3ei16_tumu(vbool64_t vm, vbfloat16mf4x3_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vluxseg4ei16_tumu(vbool64_t vm, vbfloat16mf4x4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vluxseg5ei16_tumu(vbool64_t vm, vbfloat16mf4x5_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vluxseg6ei16_tumu(vbool64_t vm, vbfloat16mf4x6_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vluxseg7ei16_tumu(vbool64_t vm, vbfloat16mf4x7_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vluxseg8ei16_tumu(vbool64_t vm, vbfloat16mf4x8_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vluxseg2ei16_tumu(vbool32_t vm, vbfloat16mf2x2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vluxseg3ei16_tumu(vbool32_t vm, vbfloat16mf2x3_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vluxseg4ei16_tumu(vbool32_t vm, vbfloat16mf2x4_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vluxseg5ei16_tumu(vbool32_t vm, vbfloat16mf2x5_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vluxseg6ei16_tumu(vbool32_t vm, vbfloat16mf2x6_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vluxseg7ei16_tumu(vbool32_t vm, vbfloat16mf2x7_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vluxseg8ei16_tumu(vbool32_t vm, vbfloat16mf2x8_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vluxseg2ei16_tumu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x3_t __riscv_vluxseg3ei16_tumu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x4_t __riscv_vluxseg4ei16_tumu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x5_t __riscv_vluxseg5ei16_tumu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x6_t __riscv_vluxseg6ei16_tumu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x7_t __riscv_vluxseg7ei16_tumu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x8_t __riscv_vluxseg8ei16_tumu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2x2_t __riscv_vluxseg2ei16_tumu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x3_t __riscv_vluxseg3ei16_tumu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x4_t __riscv_vluxseg4ei16_tumu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4x2_t __riscv_vluxseg2ei16_tumu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +// masked functions +vbfloat16mf4x2_t __riscv_vloxseg2ei16_mu(vbool64_t vm, vbfloat16mf4x2_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vloxseg3ei16_mu(vbool64_t vm, vbfloat16mf4x3_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vloxseg4ei16_mu(vbool64_t vm, vbfloat16mf4x4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vloxseg5ei16_mu(vbool64_t vm, vbfloat16mf4x5_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vloxseg6ei16_mu(vbool64_t vm, vbfloat16mf4x6_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vloxseg7ei16_mu(vbool64_t vm, vbfloat16mf4x7_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vloxseg8ei16_mu(vbool64_t vm, vbfloat16mf4x8_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vloxseg2ei16_mu(vbool32_t vm, vbfloat16mf2x2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vloxseg3ei16_mu(vbool32_t vm, vbfloat16mf2x3_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vloxseg4ei16_mu(vbool32_t vm, vbfloat16mf2x4_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vloxseg5ei16_mu(vbool32_t vm, vbfloat16mf2x5_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vloxseg6ei16_mu(vbool32_t vm, vbfloat16mf2x6_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vloxseg7ei16_mu(vbool32_t vm, vbfloat16mf2x7_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vloxseg8ei16_mu(vbool32_t vm, vbfloat16mf2x8_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vloxseg2ei16_mu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x3_t __riscv_vloxseg3ei16_mu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x4_t __riscv_vloxseg4ei16_mu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x5_t __riscv_vloxseg5ei16_mu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x6_t __riscv_vloxseg6ei16_mu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x7_t __riscv_vloxseg7ei16_mu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x8_t __riscv_vloxseg8ei16_mu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2x2_t __riscv_vloxseg2ei16_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x3_t __riscv_vloxseg3ei16_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x4_t __riscv_vloxseg4ei16_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4x2_t __riscv_vloxseg2ei16_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +vbfloat16mf4x2_t __riscv_vluxseg2ei16_mu(vbool64_t vm, vbfloat16mf4x2_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x3_t __riscv_vluxseg3ei16_mu(vbool64_t vm, vbfloat16mf4x3_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x4_t __riscv_vluxseg4ei16_mu(vbool64_t vm, vbfloat16mf4x4_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x5_t __riscv_vluxseg5ei16_mu(vbool64_t vm, vbfloat16mf4x5_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x6_t __riscv_vluxseg6ei16_mu(vbool64_t vm, vbfloat16mf4x6_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x7_t __riscv_vluxseg7ei16_mu(vbool64_t vm, vbfloat16mf4x7_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf4x8_t __riscv_vluxseg8ei16_mu(vbool64_t vm, vbfloat16mf4x8_t vd, + const __bf16 *rs1, vuint16mf4_t rs2, + size_t vl); +vbfloat16mf2x2_t __riscv_vluxseg2ei16_mu(vbool32_t vm, vbfloat16mf2x2_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x3_t __riscv_vluxseg3ei16_mu(vbool32_t vm, vbfloat16mf2x3_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x4_t __riscv_vluxseg4ei16_mu(vbool32_t vm, vbfloat16mf2x4_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x5_t __riscv_vluxseg5ei16_mu(vbool32_t vm, vbfloat16mf2x5_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x6_t __riscv_vluxseg6ei16_mu(vbool32_t vm, vbfloat16mf2x6_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x7_t __riscv_vluxseg7ei16_mu(vbool32_t vm, vbfloat16mf2x7_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16mf2x8_t __riscv_vluxseg8ei16_mu(vbool32_t vm, vbfloat16mf2x8_t vd, + const __bf16 *rs1, vuint16mf2_t rs2, + size_t vl); +vbfloat16m1x2_t __riscv_vluxseg2ei16_mu(vbool16_t vm, vbfloat16m1x2_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x3_t __riscv_vluxseg3ei16_mu(vbool16_t vm, vbfloat16m1x3_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x4_t __riscv_vluxseg4ei16_mu(vbool16_t vm, vbfloat16m1x4_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x5_t __riscv_vluxseg5ei16_mu(vbool16_t vm, vbfloat16m1x5_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x6_t __riscv_vluxseg6ei16_mu(vbool16_t vm, vbfloat16m1x6_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x7_t __riscv_vluxseg7ei16_mu(vbool16_t vm, vbfloat16m1x7_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m1x8_t __riscv_vluxseg8ei16_mu(vbool16_t vm, vbfloat16m1x8_t vd, + const __bf16 *rs1, vuint16m1_t rs2, + size_t vl); +vbfloat16m2x2_t __riscv_vluxseg2ei16_mu(vbool8_t vm, vbfloat16m2x2_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x3_t __riscv_vluxseg3ei16_mu(vbool8_t vm, vbfloat16m2x3_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m2x4_t __riscv_vluxseg4ei16_mu(vbool8_t vm, vbfloat16m2x4_t vd, + const __bf16 *rs1, vuint16m2_t rs2, + size_t vl); +vbfloat16m4x2_t __riscv_vluxseg2ei16_mu(vbool4_t vm, vbfloat16m4x2_t vd, + const __bf16 *rs1, vuint16m4_t rs2, + size_t vl); +---- + +[[policy-variant-overloadedvector-indexed-segment-store]] +==== Vector Indexed Segment Store Intrinsics +Intrinsics here don't have a policy variant. diff --git a/auto-generated/bfloat16/policy_funcs/overloaded_intrinsic_funcs/02_bfloat16_convert_intrinsics.adoc b/auto-generated/bfloat16/policy_funcs/overloaded_intrinsic_funcs/02_bfloat16_convert_intrinsics.adoc new file mode 100644 index 000000000..94b1ff8f3 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded_intrinsic_funcs/02_bfloat16_convert_intrinsics.adoc @@ -0,0 +1,160 @@ + +=== BFloat16 Convert Intrinsics + +[[policy-variant-overloadedbf16-vector-narrow-convert]] +==== Vector Narrowing Convert Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vfncvtbf16_f_tu(vbfloat16mf4_t vd, vfloat32mf2_t vs2, + size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_tu(vbfloat16mf2_t vd, vfloat32m1_t vs2, + size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_tu(vbfloat16m1_t vd, vfloat32m2_t vs2, + size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_tu(vbfloat16m2_t vd, vfloat32m4_t vs2, + size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_tu(vbfloat16m4_t vd, vfloat32m8_t vs2, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f_tum(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_tum(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_tum(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_tum(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_tum(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f_tumu(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_tumu(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_tumu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_tumu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_tumu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f_mu(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_mu(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_mu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_mu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_mu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl); +vbfloat16mf4_t __riscv_vfncvtbf16_f_tu(vbfloat16mf4_t vd, vfloat32mf2_t vs2, + unsigned int frm, size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_tu(vbfloat16mf2_t vd, vfloat32m1_t vs2, + unsigned int frm, size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_tu(vbfloat16m1_t vd, vfloat32m2_t vs2, + unsigned int frm, size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_tu(vbfloat16m2_t vd, vfloat32m4_t vs2, + unsigned int frm, size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_tu(vbfloat16m4_t vd, vfloat32m8_t vs2, + unsigned int frm, size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f_tum(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, unsigned int frm, + size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_tum(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, unsigned int frm, + size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_tum(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, unsigned int frm, + size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_tum(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, unsigned int frm, + size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_tum(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, unsigned int frm, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f_tumu(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, unsigned int frm, + size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_tumu(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, unsigned int frm, + size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_tumu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, unsigned int frm, + size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_tumu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, unsigned int frm, + size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_tumu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, unsigned int frm, + size_t vl); +// masked functions +vbfloat16mf4_t __riscv_vfncvtbf16_f_mu(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, unsigned int frm, + size_t vl); +vbfloat16mf2_t __riscv_vfncvtbf16_f_mu(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, unsigned int frm, + size_t vl); +vbfloat16m1_t __riscv_vfncvtbf16_f_mu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, unsigned int frm, + size_t vl); +vbfloat16m2_t __riscv_vfncvtbf16_f_mu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, unsigned int frm, + size_t vl); +vbfloat16m4_t __riscv_vfncvtbf16_f_mu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, unsigned int frm, + size_t vl); +---- + +[[policy-variant-overloadedbf16-vector-widening-convert]] +==== Vector Widening Convert Intrinsics + +[,c] +---- +vfloat32mf2_t __riscv_vfwcvtbf16_f_tu(vfloat32mf2_t vd, vbfloat16mf4_t vs2, + size_t vl); +vfloat32m1_t __riscv_vfwcvtbf16_f_tu(vfloat32m1_t vd, vbfloat16mf2_t vs2, + size_t vl); +vfloat32m2_t __riscv_vfwcvtbf16_f_tu(vfloat32m2_t vd, vbfloat16m1_t vs2, + size_t vl); +vfloat32m4_t __riscv_vfwcvtbf16_f_tu(vfloat32m4_t vd, vbfloat16m2_t vs2, + size_t vl); +vfloat32m8_t __riscv_vfwcvtbf16_f_tu(vfloat32m8_t vd, vbfloat16m4_t vs2, + size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwcvtbf16_f_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwcvtbf16_f_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwcvtbf16_f_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwcvtbf16_f_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwcvtbf16_f_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwcvtbf16_f_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwcvtbf16_f_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwcvtbf16_f_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwcvtbf16_f_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwcvtbf16_f_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwcvtbf16_f_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwcvtbf16_f_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwcvtbf16_f_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwcvtbf16_f_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwcvtbf16_f_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl); +---- diff --git a/auto-generated/bfloat16/policy_funcs/overloaded_intrinsic_funcs/03_bfloat16_arithmetic_intrinsics.adoc b/auto-generated/bfloat16/policy_funcs/overloaded_intrinsic_funcs/03_bfloat16_arithmetic_intrinsics.adoc new file mode 100644 index 000000000..05048e1c7 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded_intrinsic_funcs/03_bfloat16_arithmetic_intrinsics.adoc @@ -0,0 +1,266 @@ + +=== BFloat16 Arithmetic Intrinsics + +[[policy-variant-overloadedbf16-widening-multiply-accumulate]] +==== Vector Widening Multiply-Accumulate Intrinsics + +[,c] +---- +vfloat32mf2_t __riscv_vfwmaccbf16_tu(vfloat32mf2_t vd, vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_tu(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_tu(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_tu(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_tu(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwmaccbf16_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, + size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_tum(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_tum(vbool32_t vm, vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, vbfloat16m1_t vs2, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_tum(vbool16_t vm, vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, vbfloat16m2_t vs2, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_tum(vbool8_t vm, vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, vbfloat16m4_t vs2, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_tum(vbool4_t vm, vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwmaccbf16_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, + size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_tumu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_tumu(vbool32_t vm, vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, vbfloat16m1_t vs2, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_tumu(vbool16_t vm, vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, vbfloat16m2_t vs2, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_tumu(vbool8_t vm, vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, vbfloat16m4_t vs2, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_tumu(vbool4_t vm, vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwmaccbf16_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, + size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_mu(vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_mu(vbool32_t vm, vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, vbfloat16m1_t vs2, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_mu(vbool16_t vm, vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, vbfloat16m2_t vs2, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_mu(vbool8_t vm, vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, vbfloat16m4_t vs2, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_mu(vbool4_t vm, vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_tu(vfloat32mf2_t vd, vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, unsigned int frm, + size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, unsigned int frm, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_tu(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, unsigned int frm, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, unsigned int frm, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_tu(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, unsigned int frm, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, unsigned int frm, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_tu(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, unsigned int frm, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, unsigned int frm, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_tu(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, unsigned int frm, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, unsigned int frm, + size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwmaccbf16_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_tum(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_tum(vbool32_t vm, vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, unsigned int frm, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_tum(vbool16_t vm, vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, unsigned int frm, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_tum(vbool8_t vm, vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, unsigned int frm, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_tum(vbool4_t vm, vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, unsigned int frm, + size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwmaccbf16_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_tumu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_tumu(vbool32_t vm, vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, unsigned int frm, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_tumu(vbool16_t vm, vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, unsigned int frm, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_tumu(vbool8_t vm, vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, unsigned int frm, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_tumu(vbool4_t vm, vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, unsigned int frm, + size_t vl); +// masked functions +vfloat32mf2_t __riscv_vfwmaccbf16_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, + unsigned int frm, size_t vl); +vfloat32mf2_t __riscv_vfwmaccbf16_mu(vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, unsigned int frm, + size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, + unsigned int frm, size_t vl); +vfloat32m1_t __riscv_vfwmaccbf16_mu(vbool32_t vm, vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, unsigned int frm, + size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, vbfloat16m1_t vs2, + unsigned int frm, size_t vl); +vfloat32m2_t __riscv_vfwmaccbf16_mu(vbool16_t vm, vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, unsigned int frm, + size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, vbfloat16m2_t vs2, + unsigned int frm, size_t vl); +vfloat32m4_t __riscv_vfwmaccbf16_mu(vbool8_t vm, vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, unsigned int frm, + size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, vbfloat16m4_t vs2, + unsigned int frm, size_t vl); +vfloat32m8_t __riscv_vfwmaccbf16_mu(vbool4_t vm, vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, unsigned int frm, + size_t vl); +---- + +[[policy-variant-overloadedvector-bf16-move]] +==== Vector BFloat16 Move Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vmv_v_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, + size_t vl); +vbfloat16mf2_t __riscv_vmv_v_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, + size_t vl); +vbfloat16m1_t __riscv_vmv_v_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, size_t vl); +vbfloat16m2_t __riscv_vmv_v_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, size_t vl); +vbfloat16m4_t __riscv_vmv_v_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, size_t vl); +vbfloat16m8_t __riscv_vmv_v_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, size_t vl); +---- + +[[policy-variant-overloadedvector-bf16-merge]] +==== Vector BFloat16 Merge Intrinsics + +[,c] +---- +vbfloat16mf4_t __riscv_vmerge_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, vbool64_t v0, size_t vl); +vbfloat16mf2_t __riscv_vmerge_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, vbool32_t v0, size_t vl); +vbfloat16m1_t __riscv_vmerge_tu(vbfloat16m1_t vd, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, vbool16_t v0, size_t vl); +vbfloat16m2_t __riscv_vmerge_tu(vbfloat16m2_t vd, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, vbool8_t v0, size_t vl); +vbfloat16m4_t __riscv_vmerge_tu(vbfloat16m4_t vd, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, vbool4_t v0, size_t vl); +vbfloat16m8_t __riscv_vmerge_tu(vbfloat16m8_t vd, vbfloat16m8_t vs2, + vbfloat16m8_t vs1, vbool2_t v0, size_t vl); +---- diff --git a/auto-generated/bfloat16/policy_funcs/overloaded_intrinsic_funcs/04_bfloat16_miscellaneous_vector_utility_intrinsics.adoc b/auto-generated/bfloat16/policy_funcs/overloaded_intrinsic_funcs/04_bfloat16_miscellaneous_vector_utility_intrinsics.adoc new file mode 100644 index 000000000..db730fe08 --- /dev/null +++ b/auto-generated/bfloat16/policy_funcs/overloaded_intrinsic_funcs/04_bfloat16_miscellaneous_vector_utility_intrinsics.adoc @@ -0,0 +1,30 @@ + +=== BFloat16 Miscellaneous Vector Utility Intrinsics + +[[policy-variant-overloadedreinterpret-cast-conversion]] +==== Reinterpret Cast Conversion Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-overloadedvector-lmul-extensionn]] +==== Vector LMUL Extension Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-overloadedvector-lmul-truncation]] +==== Vector LMUL Truncation Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-overloadedvector-initialization]] +==== Vector Initialization Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-overloadedvector-insertion]] +==== Vector Insertion Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-overloadedvector-extraction]] +==== Vector Extraction Intrinsics +Intrinsics here don't have a policy variant. + +[[policy-variant-overloadedvector-creation]] +==== Vector Creation Intrinsics +Intrinsics here don't have a policy variant. diff --git a/auto-generated/gnu-api-tests/vcpop.c b/auto-generated/gnu-api-tests/vcpop.c index 7ebc6140c..038d03754 100644 --- a/auto-generated/gnu-api-tests/vcpop.c +++ b/auto-generated/gnu-api-tests/vcpop.c @@ -3,59 +3,59 @@ #include -unsigned int test_vcpop_m_b1(vbool1_t vs2, size_t vl) { +unsigned long test_vcpop_m_b1(vbool1_t vs2, size_t vl) { return __riscv_vcpop_m_b1(vs2, vl); } -unsigned int test_vcpop_m_b2(vbool2_t vs2, size_t vl) { +unsigned long test_vcpop_m_b2(vbool2_t vs2, size_t vl) { return __riscv_vcpop_m_b2(vs2, vl); } -unsigned int test_vcpop_m_b4(vbool4_t vs2, size_t vl) { +unsigned long test_vcpop_m_b4(vbool4_t vs2, size_t vl) { return __riscv_vcpop_m_b4(vs2, vl); } -unsigned int test_vcpop_m_b8(vbool8_t vs2, size_t vl) { +unsigned long test_vcpop_m_b8(vbool8_t vs2, size_t vl) { return __riscv_vcpop_m_b8(vs2, vl); } -unsigned int test_vcpop_m_b16(vbool16_t vs2, size_t vl) { +unsigned long test_vcpop_m_b16(vbool16_t vs2, size_t vl) { return __riscv_vcpop_m_b16(vs2, vl); } -unsigned int test_vcpop_m_b32(vbool32_t vs2, size_t vl) { +unsigned long test_vcpop_m_b32(vbool32_t vs2, size_t vl) { return __riscv_vcpop_m_b32(vs2, vl); } -unsigned int test_vcpop_m_b64(vbool64_t vs2, size_t vl) { +unsigned long test_vcpop_m_b64(vbool64_t vs2, size_t vl) { return __riscv_vcpop_m_b64(vs2, vl); } -unsigned int test_vcpop_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { +unsigned long test_vcpop_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { return __riscv_vcpop_m_b1_m(vm, vs2, vl); } -unsigned int test_vcpop_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { +unsigned long test_vcpop_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { return __riscv_vcpop_m_b2_m(vm, vs2, vl); } -unsigned int test_vcpop_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { +unsigned long test_vcpop_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { return __riscv_vcpop_m_b4_m(vm, vs2, vl); } -unsigned int test_vcpop_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { +unsigned long test_vcpop_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { return __riscv_vcpop_m_b8_m(vm, vs2, vl); } -unsigned int test_vcpop_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { +unsigned long test_vcpop_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { return __riscv_vcpop_m_b16_m(vm, vs2, vl); } -unsigned int test_vcpop_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { +unsigned long test_vcpop_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { return __riscv_vcpop_m_b32_m(vm, vs2, vl); } -unsigned int test_vcpop_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { +unsigned long test_vcpop_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { return __riscv_vcpop_m_b64_m(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vcpop\.[ivxfswum.]+\s+} 14 } } */ diff --git a/auto-generated/gnu-api-tests/vfirst.c b/auto-generated/gnu-api-tests/vfirst.c index e2de33c9b..37a4a8f15 100644 --- a/auto-generated/gnu-api-tests/vfirst.c +++ b/auto-generated/gnu-api-tests/vfirst.c @@ -3,59 +3,59 @@ #include -int test_vfirst_m_b1(vbool1_t vs2, size_t vl) { +long test_vfirst_m_b1(vbool1_t vs2, size_t vl) { return __riscv_vfirst_m_b1(vs2, vl); } -int test_vfirst_m_b2(vbool2_t vs2, size_t vl) { +long test_vfirst_m_b2(vbool2_t vs2, size_t vl) { return __riscv_vfirst_m_b2(vs2, vl); } -int test_vfirst_m_b4(vbool4_t vs2, size_t vl) { +long test_vfirst_m_b4(vbool4_t vs2, size_t vl) { return __riscv_vfirst_m_b4(vs2, vl); } -int test_vfirst_m_b8(vbool8_t vs2, size_t vl) { +long test_vfirst_m_b8(vbool8_t vs2, size_t vl) { return __riscv_vfirst_m_b8(vs2, vl); } -int test_vfirst_m_b16(vbool16_t vs2, size_t vl) { +long test_vfirst_m_b16(vbool16_t vs2, size_t vl) { return __riscv_vfirst_m_b16(vs2, vl); } -int test_vfirst_m_b32(vbool32_t vs2, size_t vl) { +long test_vfirst_m_b32(vbool32_t vs2, size_t vl) { return __riscv_vfirst_m_b32(vs2, vl); } -int test_vfirst_m_b64(vbool64_t vs2, size_t vl) { +long test_vfirst_m_b64(vbool64_t vs2, size_t vl) { return __riscv_vfirst_m_b64(vs2, vl); } -int test_vfirst_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { +long test_vfirst_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { return __riscv_vfirst_m_b1_m(vm, vs2, vl); } -int test_vfirst_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { +long test_vfirst_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { return __riscv_vfirst_m_b2_m(vm, vs2, vl); } -int test_vfirst_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { +long test_vfirst_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { return __riscv_vfirst_m_b4_m(vm, vs2, vl); } -int test_vfirst_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { +long test_vfirst_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { return __riscv_vfirst_m_b8_m(vm, vs2, vl); } -int test_vfirst_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { +long test_vfirst_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { return __riscv_vfirst_m_b16_m(vm, vs2, vl); } -int test_vfirst_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { +long test_vfirst_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { return __riscv_vfirst_m_b32_m(vm, vs2, vl); } -int test_vfirst_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { +long test_vfirst_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { return __riscv_vfirst_m_b64_m(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfirst\.[ivxfswum.]+\s+} 14 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vcpop.c b/auto-generated/gnu-overloaded-tests/vcpop.c index c0e42e89a..b6bb5a6fb 100644 --- a/auto-generated/gnu-overloaded-tests/vcpop.c +++ b/auto-generated/gnu-overloaded-tests/vcpop.c @@ -3,59 +3,59 @@ #include -unsigned int test_vcpop_m_b1(vbool1_t vs2, size_t vl) { +unsigned long test_vcpop_m_b1(vbool1_t vs2, size_t vl) { return __riscv_vcpop(vs2, vl); } -unsigned int test_vcpop_m_b2(vbool2_t vs2, size_t vl) { +unsigned long test_vcpop_m_b2(vbool2_t vs2, size_t vl) { return __riscv_vcpop(vs2, vl); } -unsigned int test_vcpop_m_b4(vbool4_t vs2, size_t vl) { +unsigned long test_vcpop_m_b4(vbool4_t vs2, size_t vl) { return __riscv_vcpop(vs2, vl); } -unsigned int test_vcpop_m_b8(vbool8_t vs2, size_t vl) { +unsigned long test_vcpop_m_b8(vbool8_t vs2, size_t vl) { return __riscv_vcpop(vs2, vl); } -unsigned int test_vcpop_m_b16(vbool16_t vs2, size_t vl) { +unsigned long test_vcpop_m_b16(vbool16_t vs2, size_t vl) { return __riscv_vcpop(vs2, vl); } -unsigned int test_vcpop_m_b32(vbool32_t vs2, size_t vl) { +unsigned long test_vcpop_m_b32(vbool32_t vs2, size_t vl) { return __riscv_vcpop(vs2, vl); } -unsigned int test_vcpop_m_b64(vbool64_t vs2, size_t vl) { +unsigned long test_vcpop_m_b64(vbool64_t vs2, size_t vl) { return __riscv_vcpop(vs2, vl); } -unsigned int test_vcpop_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { +unsigned long test_vcpop_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { return __riscv_vcpop(vm, vs2, vl); } -unsigned int test_vcpop_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { +unsigned long test_vcpop_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { return __riscv_vcpop(vm, vs2, vl); } -unsigned int test_vcpop_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { +unsigned long test_vcpop_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { return __riscv_vcpop(vm, vs2, vl); } -unsigned int test_vcpop_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { +unsigned long test_vcpop_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { return __riscv_vcpop(vm, vs2, vl); } -unsigned int test_vcpop_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { +unsigned long test_vcpop_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { return __riscv_vcpop(vm, vs2, vl); } -unsigned int test_vcpop_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { +unsigned long test_vcpop_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { return __riscv_vcpop(vm, vs2, vl); } -unsigned int test_vcpop_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { +unsigned long test_vcpop_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { return __riscv_vcpop(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vcpop\.[ivxfswum.]+\s+} 14 } } */ diff --git a/auto-generated/gnu-overloaded-tests/vfirst.c b/auto-generated/gnu-overloaded-tests/vfirst.c index ea2a7b730..e215b4fcf 100644 --- a/auto-generated/gnu-overloaded-tests/vfirst.c +++ b/auto-generated/gnu-overloaded-tests/vfirst.c @@ -3,59 +3,59 @@ #include -int test_vfirst_m_b1(vbool1_t vs2, size_t vl) { +long test_vfirst_m_b1(vbool1_t vs2, size_t vl) { return __riscv_vfirst(vs2, vl); } -int test_vfirst_m_b2(vbool2_t vs2, size_t vl) { +long test_vfirst_m_b2(vbool2_t vs2, size_t vl) { return __riscv_vfirst(vs2, vl); } -int test_vfirst_m_b4(vbool4_t vs2, size_t vl) { +long test_vfirst_m_b4(vbool4_t vs2, size_t vl) { return __riscv_vfirst(vs2, vl); } -int test_vfirst_m_b8(vbool8_t vs2, size_t vl) { +long test_vfirst_m_b8(vbool8_t vs2, size_t vl) { return __riscv_vfirst(vs2, vl); } -int test_vfirst_m_b16(vbool16_t vs2, size_t vl) { +long test_vfirst_m_b16(vbool16_t vs2, size_t vl) { return __riscv_vfirst(vs2, vl); } -int test_vfirst_m_b32(vbool32_t vs2, size_t vl) { +long test_vfirst_m_b32(vbool32_t vs2, size_t vl) { return __riscv_vfirst(vs2, vl); } -int test_vfirst_m_b64(vbool64_t vs2, size_t vl) { +long test_vfirst_m_b64(vbool64_t vs2, size_t vl) { return __riscv_vfirst(vs2, vl); } -int test_vfirst_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { +long test_vfirst_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { return __riscv_vfirst(vm, vs2, vl); } -int test_vfirst_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { +long test_vfirst_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { return __riscv_vfirst(vm, vs2, vl); } -int test_vfirst_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { +long test_vfirst_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { return __riscv_vfirst(vm, vs2, vl); } -int test_vfirst_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { +long test_vfirst_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { return __riscv_vfirst(vm, vs2, vl); } -int test_vfirst_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { +long test_vfirst_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { return __riscv_vfirst(vm, vs2, vl); } -int test_vfirst_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { +long test_vfirst_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { return __riscv_vfirst(vm, vs2, vl); } -int test_vfirst_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { +long test_vfirst_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { return __riscv_vfirst(vm, vs2, vl); } /* { dg-final { scan-assembler-times {vseti?vli\s+[a-z0-9]+,\s*[a-z0-9]+,\s*e[0-9]+,\s*mf?[1248],\s*t[au],\s*m[au]\s+vfirst\.[ivxfswum.]+\s+} 14 } } */ diff --git a/auto-generated/intrinsic_funcs.adoc b/auto-generated/intrinsic_funcs.adoc index ef39a9c24..4538c7f44 100644 --- a/auto-generated/intrinsic_funcs.adoc +++ b/auto-generated/intrinsic_funcs.adoc @@ -35506,6 +35506,7 @@ vuint64m8_t __riscv_vmv_v_x_u64m8(uint64_t rs1, size_t vl); [[vector-single-width-saturating-add-and-subtract]] ==== Vector Single-Width Saturating Add and Subtract Intrinsics +After executing an intrinsic in this section, the `vxsat` CSR assumes an UNSPECIFIED value. [,c] ---- @@ -36904,6 +36905,7 @@ vuint64m8_t __riscv_vasubu_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, [[vector-single-width-fractional-multiply-with-rounding-and-saturation]] ==== Vector Single-Width Fractional Multiply with Rounding and SaturationIntrinsics +After executing an intrinsic in this section, the `vxsat` CSR assumes an UNSPECIFIED value. [,c] ---- @@ -37502,6 +37504,7 @@ vuint64m8_t __riscv_vssrl_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, [[vector-narrowing-fixed-point-clip]] ==== Vector Narrowing Fixed-Point Clip Intrinsics +After executing an intrinsic in this section, the `vxsat` CSR assumes an UNSPECIFIED value. [,c] ---- @@ -48390,21 +48393,21 @@ vbool64_t __riscv_vmnot_m_b64(vbool64_t vs, size_t vl); [,c] ---- -unsigned int __riscv_vcpop_m_b1(vbool1_t vs2, size_t vl); -unsigned int __riscv_vcpop_m_b2(vbool2_t vs2, size_t vl); -unsigned int __riscv_vcpop_m_b4(vbool4_t vs2, size_t vl); -unsigned int __riscv_vcpop_m_b8(vbool8_t vs2, size_t vl); -unsigned int __riscv_vcpop_m_b16(vbool16_t vs2, size_t vl); -unsigned int __riscv_vcpop_m_b32(vbool32_t vs2, size_t vl); -unsigned int __riscv_vcpop_m_b64(vbool64_t vs2, size_t vl); +unsigned long __riscv_vcpop_m_b1(vbool1_t vs2, size_t vl); +unsigned long __riscv_vcpop_m_b2(vbool2_t vs2, size_t vl); +unsigned long __riscv_vcpop_m_b4(vbool4_t vs2, size_t vl); +unsigned long __riscv_vcpop_m_b8(vbool8_t vs2, size_t vl); +unsigned long __riscv_vcpop_m_b16(vbool16_t vs2, size_t vl); +unsigned long __riscv_vcpop_m_b32(vbool32_t vs2, size_t vl); +unsigned long __riscv_vcpop_m_b64(vbool64_t vs2, size_t vl); // masked functions -unsigned int __riscv_vcpop_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl); -unsigned int __riscv_vcpop_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl); -unsigned int __riscv_vcpop_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl); -unsigned int __riscv_vcpop_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl); -unsigned int __riscv_vcpop_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl); -unsigned int __riscv_vcpop_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl); -unsigned int __riscv_vcpop_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl); +unsigned long __riscv_vcpop_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl); +unsigned long __riscv_vcpop_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl); +unsigned long __riscv_vcpop_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl); +unsigned long __riscv_vcpop_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl); +unsigned long __riscv_vcpop_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl); +unsigned long __riscv_vcpop_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl); +unsigned long __riscv_vcpop_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl); ---- [[vfirst-find-first-set-mask-bit]] @@ -48412,21 +48415,21 @@ unsigned int __riscv_vcpop_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl); [,c] ---- -int __riscv_vfirst_m_b1(vbool1_t vs2, size_t vl); -int __riscv_vfirst_m_b2(vbool2_t vs2, size_t vl); -int __riscv_vfirst_m_b4(vbool4_t vs2, size_t vl); -int __riscv_vfirst_m_b8(vbool8_t vs2, size_t vl); -int __riscv_vfirst_m_b16(vbool16_t vs2, size_t vl); -int __riscv_vfirst_m_b32(vbool32_t vs2, size_t vl); -int __riscv_vfirst_m_b64(vbool64_t vs2, size_t vl); +long __riscv_vfirst_m_b1(vbool1_t vs2, size_t vl); +long __riscv_vfirst_m_b2(vbool2_t vs2, size_t vl); +long __riscv_vfirst_m_b4(vbool4_t vs2, size_t vl); +long __riscv_vfirst_m_b8(vbool8_t vs2, size_t vl); +long __riscv_vfirst_m_b16(vbool16_t vs2, size_t vl); +long __riscv_vfirst_m_b32(vbool32_t vs2, size_t vl); +long __riscv_vfirst_m_b64(vbool64_t vs2, size_t vl); // masked functions -int __riscv_vfirst_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl); -int __riscv_vfirst_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl); -int __riscv_vfirst_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl); -int __riscv_vfirst_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl); -int __riscv_vfirst_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl); -int __riscv_vfirst_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl); -int __riscv_vfirst_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl); +long __riscv_vfirst_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl); +long __riscv_vfirst_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl); +long __riscv_vfirst_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl); +long __riscv_vfirst_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl); +long __riscv_vfirst_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl); +long __riscv_vfirst_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl); +long __riscv_vfirst_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl); ---- [[vmsbfm-set-before-first-mask-bit]] diff --git a/auto-generated/intrinsic_funcs/03_vector_fixed-point_arithmetic_intrinsics.adoc b/auto-generated/intrinsic_funcs/03_vector_fixed-point_arithmetic_intrinsics.adoc index 13bf79e2e..4be2c6e5c 100644 --- a/auto-generated/intrinsic_funcs/03_vector_fixed-point_arithmetic_intrinsics.adoc +++ b/auto-generated/intrinsic_funcs/03_vector_fixed-point_arithmetic_intrinsics.adoc @@ -3,6 +3,7 @@ [[vector-single-width-saturating-add-and-subtract]] ==== Vector Single-Width Saturating Add and Subtract Intrinsics +After executing an intrinsic in this section, the `vxsat` CSR assumes an UNSPECIFIED value. [,c] ---- @@ -1401,6 +1402,7 @@ vuint64m8_t __riscv_vasubu_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, [[vector-single-width-fractional-multiply-with-rounding-and-saturation]] ==== Vector Single-Width Fractional Multiply with Rounding and SaturationIntrinsics +After executing an intrinsic in this section, the `vxsat` CSR assumes an UNSPECIFIED value. [,c] ---- @@ -1999,6 +2001,7 @@ vuint64m8_t __riscv_vssrl_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, [[vector-narrowing-fixed-point-clip]] ==== Vector Narrowing Fixed-Point Clip Intrinsics +After executing an intrinsic in this section, the `vxsat` CSR assumes an UNSPECIFIED value. [,c] ---- diff --git a/auto-generated/intrinsic_funcs/06_vector_mask_intrinsics.adoc b/auto-generated/intrinsic_funcs/06_vector_mask_intrinsics.adoc index a55682d08..12d8f777c 100644 --- a/auto-generated/intrinsic_funcs/06_vector_mask_intrinsics.adoc +++ b/auto-generated/intrinsic_funcs/06_vector_mask_intrinsics.adoc @@ -97,21 +97,21 @@ vbool64_t __riscv_vmnot_m_b64(vbool64_t vs, size_t vl); [,c] ---- -unsigned int __riscv_vcpop_m_b1(vbool1_t vs2, size_t vl); -unsigned int __riscv_vcpop_m_b2(vbool2_t vs2, size_t vl); -unsigned int __riscv_vcpop_m_b4(vbool4_t vs2, size_t vl); -unsigned int __riscv_vcpop_m_b8(vbool8_t vs2, size_t vl); -unsigned int __riscv_vcpop_m_b16(vbool16_t vs2, size_t vl); -unsigned int __riscv_vcpop_m_b32(vbool32_t vs2, size_t vl); -unsigned int __riscv_vcpop_m_b64(vbool64_t vs2, size_t vl); +unsigned long __riscv_vcpop_m_b1(vbool1_t vs2, size_t vl); +unsigned long __riscv_vcpop_m_b2(vbool2_t vs2, size_t vl); +unsigned long __riscv_vcpop_m_b4(vbool4_t vs2, size_t vl); +unsigned long __riscv_vcpop_m_b8(vbool8_t vs2, size_t vl); +unsigned long __riscv_vcpop_m_b16(vbool16_t vs2, size_t vl); +unsigned long __riscv_vcpop_m_b32(vbool32_t vs2, size_t vl); +unsigned long __riscv_vcpop_m_b64(vbool64_t vs2, size_t vl); // masked functions -unsigned int __riscv_vcpop_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl); -unsigned int __riscv_vcpop_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl); -unsigned int __riscv_vcpop_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl); -unsigned int __riscv_vcpop_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl); -unsigned int __riscv_vcpop_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl); -unsigned int __riscv_vcpop_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl); -unsigned int __riscv_vcpop_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl); +unsigned long __riscv_vcpop_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl); +unsigned long __riscv_vcpop_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl); +unsigned long __riscv_vcpop_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl); +unsigned long __riscv_vcpop_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl); +unsigned long __riscv_vcpop_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl); +unsigned long __riscv_vcpop_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl); +unsigned long __riscv_vcpop_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl); ---- [[vfirst-find-first-set-mask-bit]] @@ -119,21 +119,21 @@ unsigned int __riscv_vcpop_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl); [,c] ---- -int __riscv_vfirst_m_b1(vbool1_t vs2, size_t vl); -int __riscv_vfirst_m_b2(vbool2_t vs2, size_t vl); -int __riscv_vfirst_m_b4(vbool4_t vs2, size_t vl); -int __riscv_vfirst_m_b8(vbool8_t vs2, size_t vl); -int __riscv_vfirst_m_b16(vbool16_t vs2, size_t vl); -int __riscv_vfirst_m_b32(vbool32_t vs2, size_t vl); -int __riscv_vfirst_m_b64(vbool64_t vs2, size_t vl); +long __riscv_vfirst_m_b1(vbool1_t vs2, size_t vl); +long __riscv_vfirst_m_b2(vbool2_t vs2, size_t vl); +long __riscv_vfirst_m_b4(vbool4_t vs2, size_t vl); +long __riscv_vfirst_m_b8(vbool8_t vs2, size_t vl); +long __riscv_vfirst_m_b16(vbool16_t vs2, size_t vl); +long __riscv_vfirst_m_b32(vbool32_t vs2, size_t vl); +long __riscv_vfirst_m_b64(vbool64_t vs2, size_t vl); // masked functions -int __riscv_vfirst_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl); -int __riscv_vfirst_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl); -int __riscv_vfirst_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl); -int __riscv_vfirst_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl); -int __riscv_vfirst_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl); -int __riscv_vfirst_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl); -int __riscv_vfirst_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl); +long __riscv_vfirst_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl); +long __riscv_vfirst_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl); +long __riscv_vfirst_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl); +long __riscv_vfirst_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl); +long __riscv_vfirst_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl); +long __riscv_vfirst_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl); +long __riscv_vfirst_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl); ---- [[vmsbfm-set-before-first-mask-bit]] diff --git a/auto-generated/llvm-api-tests/vcompress.c b/auto-generated/llvm-api-tests/vcompress.c index c1b853636..66ba7f945 100644 --- a/auto-generated/llvm-api-tests/vcompress.c +++ b/auto-generated/llvm-api-tests/vcompress.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vcpop.c b/auto-generated/llvm-api-tests/vcpop.c index 215a2443e..f9eab4847 100644 --- a/auto-generated/llvm-api-tests/vcpop.c +++ b/auto-generated/llvm-api-tests/vcpop.c @@ -1,63 +1,63 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -unsigned int test_vcpop_m_b1(vbool1_t vs2, size_t vl) { +unsigned long test_vcpop_m_b1(vbool1_t vs2, size_t vl) { return __riscv_vcpop_m_b1(vs2, vl); } -unsigned int test_vcpop_m_b2(vbool2_t vs2, size_t vl) { +unsigned long test_vcpop_m_b2(vbool2_t vs2, size_t vl) { return __riscv_vcpop_m_b2(vs2, vl); } -unsigned int test_vcpop_m_b4(vbool4_t vs2, size_t vl) { +unsigned long test_vcpop_m_b4(vbool4_t vs2, size_t vl) { return __riscv_vcpop_m_b4(vs2, vl); } -unsigned int test_vcpop_m_b8(vbool8_t vs2, size_t vl) { +unsigned long test_vcpop_m_b8(vbool8_t vs2, size_t vl) { return __riscv_vcpop_m_b8(vs2, vl); } -unsigned int test_vcpop_m_b16(vbool16_t vs2, size_t vl) { +unsigned long test_vcpop_m_b16(vbool16_t vs2, size_t vl) { return __riscv_vcpop_m_b16(vs2, vl); } -unsigned int test_vcpop_m_b32(vbool32_t vs2, size_t vl) { +unsigned long test_vcpop_m_b32(vbool32_t vs2, size_t vl) { return __riscv_vcpop_m_b32(vs2, vl); } -unsigned int test_vcpop_m_b64(vbool64_t vs2, size_t vl) { +unsigned long test_vcpop_m_b64(vbool64_t vs2, size_t vl) { return __riscv_vcpop_m_b64(vs2, vl); } -unsigned int test_vcpop_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { +unsigned long test_vcpop_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { return __riscv_vcpop_m_b1_m(vm, vs2, vl); } -unsigned int test_vcpop_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { +unsigned long test_vcpop_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { return __riscv_vcpop_m_b2_m(vm, vs2, vl); } -unsigned int test_vcpop_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { +unsigned long test_vcpop_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { return __riscv_vcpop_m_b4_m(vm, vs2, vl); } -unsigned int test_vcpop_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { +unsigned long test_vcpop_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { return __riscv_vcpop_m_b8_m(vm, vs2, vl); } -unsigned int test_vcpop_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { +unsigned long test_vcpop_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { return __riscv_vcpop_m_b16_m(vm, vs2, vl); } -unsigned int test_vcpop_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { +unsigned long test_vcpop_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { return __riscv_vcpop_m_b32_m(vm, vs2, vl); } -unsigned int test_vcpop_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { +unsigned long test_vcpop_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { return __riscv_vcpop_m_b64_m(vm, vs2, vl); } diff --git a/auto-generated/llvm-api-tests/vcreate.c b/auto-generated/llvm-api-tests/vcreate.c index eb210bb82..b4e93b138 100644 --- a/auto-generated/llvm-api-tests/vcreate.c +++ b/auto-generated/llvm-api-tests/vcreate.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfabs.c b/auto-generated/llvm-api-tests/vfabs.c index 32c56c5d6..adced534c 100644 --- a/auto-generated/llvm-api-tests/vfabs.c +++ b/auto-generated/llvm-api-tests/vfabs.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfadd.c b/auto-generated/llvm-api-tests/vfadd.c index 1e7c1f7bb..1b2b16922 100644 --- a/auto-generated/llvm-api-tests/vfadd.c +++ b/auto-generated/llvm-api-tests/vfadd.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfclass.c b/auto-generated/llvm-api-tests/vfclass.c index b08fe000d..541c9417c 100644 --- a/auto-generated/llvm-api-tests/vfclass.c +++ b/auto-generated/llvm-api-tests/vfclass.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfcvt.c b/auto-generated/llvm-api-tests/vfcvt.c index bd4ba3848..cd9786880 100644 --- a/auto-generated/llvm-api-tests/vfcvt.c +++ b/auto-generated/llvm-api-tests/vfcvt.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfcvt_rtz.c b/auto-generated/llvm-api-tests/vfcvt_rtz.c index 161cd61ed..bc24465e3 100644 --- a/auto-generated/llvm-api-tests/vfcvt_rtz.c +++ b/auto-generated/llvm-api-tests/vfcvt_rtz.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfdiv.c b/auto-generated/llvm-api-tests/vfdiv.c index 22e15199a..6f8e8f0d8 100644 --- a/auto-generated/llvm-api-tests/vfdiv.c +++ b/auto-generated/llvm-api-tests/vfdiv.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfirst.c b/auto-generated/llvm-api-tests/vfirst.c index e0c651c03..770afdb2d 100644 --- a/auto-generated/llvm-api-tests/vfirst.c +++ b/auto-generated/llvm-api-tests/vfirst.c @@ -5,58 +5,58 @@ #include -int test_vfirst_m_b1(vbool1_t vs2, size_t vl) { +long test_vfirst_m_b1(vbool1_t vs2, size_t vl) { return __riscv_vfirst_m_b1(vs2, vl); } -int test_vfirst_m_b2(vbool2_t vs2, size_t vl) { +long test_vfirst_m_b2(vbool2_t vs2, size_t vl) { return __riscv_vfirst_m_b2(vs2, vl); } -int test_vfirst_m_b4(vbool4_t vs2, size_t vl) { +long test_vfirst_m_b4(vbool4_t vs2, size_t vl) { return __riscv_vfirst_m_b4(vs2, vl); } -int test_vfirst_m_b8(vbool8_t vs2, size_t vl) { +long test_vfirst_m_b8(vbool8_t vs2, size_t vl) { return __riscv_vfirst_m_b8(vs2, vl); } -int test_vfirst_m_b16(vbool16_t vs2, size_t vl) { +long test_vfirst_m_b16(vbool16_t vs2, size_t vl) { return __riscv_vfirst_m_b16(vs2, vl); } -int test_vfirst_m_b32(vbool32_t vs2, size_t vl) { +long test_vfirst_m_b32(vbool32_t vs2, size_t vl) { return __riscv_vfirst_m_b32(vs2, vl); } -int test_vfirst_m_b64(vbool64_t vs2, size_t vl) { +long test_vfirst_m_b64(vbool64_t vs2, size_t vl) { return __riscv_vfirst_m_b64(vs2, vl); } -int test_vfirst_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { +long test_vfirst_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { return __riscv_vfirst_m_b1_m(vm, vs2, vl); } -int test_vfirst_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { +long test_vfirst_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { return __riscv_vfirst_m_b2_m(vm, vs2, vl); } -int test_vfirst_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { +long test_vfirst_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { return __riscv_vfirst_m_b4_m(vm, vs2, vl); } -int test_vfirst_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { +long test_vfirst_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { return __riscv_vfirst_m_b8_m(vm, vs2, vl); } -int test_vfirst_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { +long test_vfirst_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { return __riscv_vfirst_m_b16_m(vm, vs2, vl); } -int test_vfirst_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { +long test_vfirst_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { return __riscv_vfirst_m_b32_m(vm, vs2, vl); } -int test_vfirst_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { +long test_vfirst_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { return __riscv_vfirst_m_b64_m(vm, vs2, vl); } diff --git a/auto-generated/llvm-api-tests/vfmacc.c b/auto-generated/llvm-api-tests/vfmacc.c index d62f00134..a20dbb446 100644 --- a/auto-generated/llvm-api-tests/vfmacc.c +++ b/auto-generated/llvm-api-tests/vfmacc.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfmadd.c b/auto-generated/llvm-api-tests/vfmadd.c index a15b7b458..171fdd960 100644 --- a/auto-generated/llvm-api-tests/vfmadd.c +++ b/auto-generated/llvm-api-tests/vfmadd.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfmax.c b/auto-generated/llvm-api-tests/vfmax.c index 4ab927dce..a8417714a 100644 --- a/auto-generated/llvm-api-tests/vfmax.c +++ b/auto-generated/llvm-api-tests/vfmax.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfmerge.c b/auto-generated/llvm-api-tests/vfmerge.c index b5c61ce53..84e03c8d9 100644 --- a/auto-generated/llvm-api-tests/vfmerge.c +++ b/auto-generated/llvm-api-tests/vfmerge.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfmin.c b/auto-generated/llvm-api-tests/vfmin.c index 99824aa6d..b71a75355 100644 --- a/auto-generated/llvm-api-tests/vfmin.c +++ b/auto-generated/llvm-api-tests/vfmin.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfmsac.c b/auto-generated/llvm-api-tests/vfmsac.c index c98410bfd..8a8184bb7 100644 --- a/auto-generated/llvm-api-tests/vfmsac.c +++ b/auto-generated/llvm-api-tests/vfmsac.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfmsub.c b/auto-generated/llvm-api-tests/vfmsub.c index fbf8eb409..abdd2b4ba 100644 --- a/auto-generated/llvm-api-tests/vfmsub.c +++ b/auto-generated/llvm-api-tests/vfmsub.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfmul.c b/auto-generated/llvm-api-tests/vfmul.c index 7a8f62b2f..9ced5cb86 100644 --- a/auto-generated/llvm-api-tests/vfmul.c +++ b/auto-generated/llvm-api-tests/vfmul.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfmv.c b/auto-generated/llvm-api-tests/vfmv.c index 00bbac070..eaebfe803 100644 --- a/auto-generated/llvm-api-tests/vfmv.c +++ b/auto-generated/llvm-api-tests/vfmv.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfncvt.c b/auto-generated/llvm-api-tests/vfncvt.c index 13ddea146..34cf539ba 100644 --- a/auto-generated/llvm-api-tests/vfncvt.c +++ b/auto-generated/llvm-api-tests/vfncvt.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfncvt_rod.c b/auto-generated/llvm-api-tests/vfncvt_rod.c index c9d4d43a2..37d976494 100644 --- a/auto-generated/llvm-api-tests/vfncvt_rod.c +++ b/auto-generated/llvm-api-tests/vfncvt_rod.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfncvt_rtz.c b/auto-generated/llvm-api-tests/vfncvt_rtz.c index e16577975..4365aa631 100644 --- a/auto-generated/llvm-api-tests/vfncvt_rtz.c +++ b/auto-generated/llvm-api-tests/vfncvt_rtz.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfneg.c b/auto-generated/llvm-api-tests/vfneg.c index 675185ffb..00f2738b5 100644 --- a/auto-generated/llvm-api-tests/vfneg.c +++ b/auto-generated/llvm-api-tests/vfneg.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfnmacc.c b/auto-generated/llvm-api-tests/vfnmacc.c index 362ffa61f..1604e59e9 100644 --- a/auto-generated/llvm-api-tests/vfnmacc.c +++ b/auto-generated/llvm-api-tests/vfnmacc.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfnmadd.c b/auto-generated/llvm-api-tests/vfnmadd.c index b3a6e6968..f3c611920 100644 --- a/auto-generated/llvm-api-tests/vfnmadd.c +++ b/auto-generated/llvm-api-tests/vfnmadd.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfnmsac.c b/auto-generated/llvm-api-tests/vfnmsac.c index ab69d02d2..ef15e33e6 100644 --- a/auto-generated/llvm-api-tests/vfnmsac.c +++ b/auto-generated/llvm-api-tests/vfnmsac.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfnmsub.c b/auto-generated/llvm-api-tests/vfnmsub.c index 3db7b157b..06d555eea 100644 --- a/auto-generated/llvm-api-tests/vfnmsub.c +++ b/auto-generated/llvm-api-tests/vfnmsub.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfrdiv.c b/auto-generated/llvm-api-tests/vfrdiv.c index 148f0aab3..83d568a8c 100644 --- a/auto-generated/llvm-api-tests/vfrdiv.c +++ b/auto-generated/llvm-api-tests/vfrdiv.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfrec7.c b/auto-generated/llvm-api-tests/vfrec7.c index 1eee5ca98..63457e869 100644 --- a/auto-generated/llvm-api-tests/vfrec7.c +++ b/auto-generated/llvm-api-tests/vfrec7.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfredmax.c b/auto-generated/llvm-api-tests/vfredmax.c index 6c589e7f8..99a66b6a6 100644 --- a/auto-generated/llvm-api-tests/vfredmax.c +++ b/auto-generated/llvm-api-tests/vfredmax.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfredmin.c b/auto-generated/llvm-api-tests/vfredmin.c index 452e21300..2b86c411b 100644 --- a/auto-generated/llvm-api-tests/vfredmin.c +++ b/auto-generated/llvm-api-tests/vfredmin.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfredosum.c b/auto-generated/llvm-api-tests/vfredosum.c index 4c814c631..629b5a700 100644 --- a/auto-generated/llvm-api-tests/vfredosum.c +++ b/auto-generated/llvm-api-tests/vfredosum.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfredusum.c b/auto-generated/llvm-api-tests/vfredusum.c index c9a14d21c..8e508fa30 100644 --- a/auto-generated/llvm-api-tests/vfredusum.c +++ b/auto-generated/llvm-api-tests/vfredusum.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfrsqrt7.c b/auto-generated/llvm-api-tests/vfrsqrt7.c index a8f842630..97ff9d763 100644 --- a/auto-generated/llvm-api-tests/vfrsqrt7.c +++ b/auto-generated/llvm-api-tests/vfrsqrt7.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfrsub.c b/auto-generated/llvm-api-tests/vfrsub.c index 45b5b9be0..130152a86 100644 --- a/auto-generated/llvm-api-tests/vfrsub.c +++ b/auto-generated/llvm-api-tests/vfrsub.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfsgnj.c b/auto-generated/llvm-api-tests/vfsgnj.c index bf86ab1f7..c7c14646d 100644 --- a/auto-generated/llvm-api-tests/vfsgnj.c +++ b/auto-generated/llvm-api-tests/vfsgnj.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfsgnjn.c b/auto-generated/llvm-api-tests/vfsgnjn.c index ea4687731..7d452ebfa 100644 --- a/auto-generated/llvm-api-tests/vfsgnjn.c +++ b/auto-generated/llvm-api-tests/vfsgnjn.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfsgnjx.c b/auto-generated/llvm-api-tests/vfsgnjx.c index d46b43734..1e116f4c0 100644 --- a/auto-generated/llvm-api-tests/vfsgnjx.c +++ b/auto-generated/llvm-api-tests/vfsgnjx.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfslide1down.c b/auto-generated/llvm-api-tests/vfslide1down.c index 114953471..fbb42b5f4 100644 --- a/auto-generated/llvm-api-tests/vfslide1down.c +++ b/auto-generated/llvm-api-tests/vfslide1down.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfslide1up.c b/auto-generated/llvm-api-tests/vfslide1up.c index e2cc32def..7a38df8e4 100644 --- a/auto-generated/llvm-api-tests/vfslide1up.c +++ b/auto-generated/llvm-api-tests/vfslide1up.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfsqrt.c b/auto-generated/llvm-api-tests/vfsqrt.c index 78ed79818..cbe3068c2 100644 --- a/auto-generated/llvm-api-tests/vfsqrt.c +++ b/auto-generated/llvm-api-tests/vfsqrt.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfsub.c b/auto-generated/llvm-api-tests/vfsub.c index fa17a9e17..6bd3ae2b1 100644 --- a/auto-generated/llvm-api-tests/vfsub.c +++ b/auto-generated/llvm-api-tests/vfsub.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfwadd.c b/auto-generated/llvm-api-tests/vfwadd.c index 41c9259bd..d12d482bb 100644 --- a/auto-generated/llvm-api-tests/vfwadd.c +++ b/auto-generated/llvm-api-tests/vfwadd.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfwcvt.c b/auto-generated/llvm-api-tests/vfwcvt.c index e890f795c..eab0aec1c 100644 --- a/auto-generated/llvm-api-tests/vfwcvt.c +++ b/auto-generated/llvm-api-tests/vfwcvt.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfwcvt_rtz.c b/auto-generated/llvm-api-tests/vfwcvt_rtz.c index 0989e272e..c906df136 100644 --- a/auto-generated/llvm-api-tests/vfwcvt_rtz.c +++ b/auto-generated/llvm-api-tests/vfwcvt_rtz.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfwmacc.c b/auto-generated/llvm-api-tests/vfwmacc.c index 6c033ed24..452a5d5ab 100644 --- a/auto-generated/llvm-api-tests/vfwmacc.c +++ b/auto-generated/llvm-api-tests/vfwmacc.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfwmsac.c b/auto-generated/llvm-api-tests/vfwmsac.c index 13dac6399..9e56700a0 100644 --- a/auto-generated/llvm-api-tests/vfwmsac.c +++ b/auto-generated/llvm-api-tests/vfwmsac.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfwmul.c b/auto-generated/llvm-api-tests/vfwmul.c index e9356f26e..411561c07 100644 --- a/auto-generated/llvm-api-tests/vfwmul.c +++ b/auto-generated/llvm-api-tests/vfwmul.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfwnmacc.c b/auto-generated/llvm-api-tests/vfwnmacc.c index 1dc428733..36bcae2a8 100644 --- a/auto-generated/llvm-api-tests/vfwnmacc.c +++ b/auto-generated/llvm-api-tests/vfwnmacc.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfwnmsac.c b/auto-generated/llvm-api-tests/vfwnmsac.c index 558bc3f02..565516d0e 100644 --- a/auto-generated/llvm-api-tests/vfwnmsac.c +++ b/auto-generated/llvm-api-tests/vfwnmsac.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfwredosum.c b/auto-generated/llvm-api-tests/vfwredosum.c index e0435cb6f..15558ff50 100644 --- a/auto-generated/llvm-api-tests/vfwredosum.c +++ b/auto-generated/llvm-api-tests/vfwredosum.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfwredusum.c b/auto-generated/llvm-api-tests/vfwredusum.c index 805f803e7..030a39d1b 100644 --- a/auto-generated/llvm-api-tests/vfwredusum.c +++ b/auto-generated/llvm-api-tests/vfwredusum.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vfwsub.c b/auto-generated/llvm-api-tests/vfwsub.c index 4ea5afc1e..25d5cd882 100644 --- a/auto-generated/llvm-api-tests/vfwsub.c +++ b/auto-generated/llvm-api-tests/vfwsub.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vget.c b/auto-generated/llvm-api-tests/vget.c index 0a1ef78b9..4cdc31aee 100644 --- a/auto-generated/llvm-api-tests/vget.c +++ b/auto-generated/llvm-api-tests/vget.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vle16.c b/auto-generated/llvm-api-tests/vle16.c index 1edb8b2e5..ca80781cd 100644 --- a/auto-generated/llvm-api-tests/vle16.c +++ b/auto-generated/llvm-api-tests/vle16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vle16ff.c b/auto-generated/llvm-api-tests/vle16ff.c index 3ada7b0b7..7fba41965 100644 --- a/auto-generated/llvm-api-tests/vle16ff.c +++ b/auto-generated/llvm-api-tests/vle16ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vle32.c b/auto-generated/llvm-api-tests/vle32.c index 23abaac35..96b4f1adb 100644 --- a/auto-generated/llvm-api-tests/vle32.c +++ b/auto-generated/llvm-api-tests/vle32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vle32ff.c b/auto-generated/llvm-api-tests/vle32ff.c index 13d16aba7..168d3b626 100644 --- a/auto-generated/llvm-api-tests/vle32ff.c +++ b/auto-generated/llvm-api-tests/vle32ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vle64.c b/auto-generated/llvm-api-tests/vle64.c index 89b222c0f..129e9136b 100644 --- a/auto-generated/llvm-api-tests/vle64.c +++ b/auto-generated/llvm-api-tests/vle64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vle64ff.c b/auto-generated/llvm-api-tests/vle64ff.c index e0e57702c..d675688d1 100644 --- a/auto-generated/llvm-api-tests/vle64ff.c +++ b/auto-generated/llvm-api-tests/vle64ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vle8.c b/auto-generated/llvm-api-tests/vle8.c index cf7ad5a70..f3b7ebdcb 100644 --- a/auto-generated/llvm-api-tests/vle8.c +++ b/auto-generated/llvm-api-tests/vle8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vle8ff.c b/auto-generated/llvm-api-tests/vle8ff.c index 89e9fa727..40bcb637b 100644 --- a/auto-generated/llvm-api-tests/vle8ff.c +++ b/auto-generated/llvm-api-tests/vle8ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlmul_ext_v.c b/auto-generated/llvm-api-tests/vlmul_ext_v.c index 4ba4b622d..f5efa7d37 100644 --- a/auto-generated/llvm-api-tests/vlmul_ext_v.c +++ b/auto-generated/llvm-api-tests/vlmul_ext_v.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlmul_trunc_v.c b/auto-generated/llvm-api-tests/vlmul_trunc_v.c index e818f02b8..5593e0c02 100644 --- a/auto-generated/llvm-api-tests/vlmul_trunc_v.c +++ b/auto-generated/llvm-api-tests/vlmul_trunc_v.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxei16.c b/auto-generated/llvm-api-tests/vloxei16.c index 4a3d841ac..6aac7d028 100644 --- a/auto-generated/llvm-api-tests/vloxei16.c +++ b/auto-generated/llvm-api-tests/vloxei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxei32.c b/auto-generated/llvm-api-tests/vloxei32.c index 4e11ae983..ddb683a7e 100644 --- a/auto-generated/llvm-api-tests/vloxei32.c +++ b/auto-generated/llvm-api-tests/vloxei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxei64.c b/auto-generated/llvm-api-tests/vloxei64.c index 83c8b97ea..92fc3f303 100644 --- a/auto-generated/llvm-api-tests/vloxei64.c +++ b/auto-generated/llvm-api-tests/vloxei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxei8.c b/auto-generated/llvm-api-tests/vloxei8.c index 853c7222c..13f1fb3c2 100644 --- a/auto-generated/llvm-api-tests/vloxei8.c +++ b/auto-generated/llvm-api-tests/vloxei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxseg2ei16.c b/auto-generated/llvm-api-tests/vloxseg2ei16.c index 02df39e7d..71129dee7 100644 --- a/auto-generated/llvm-api-tests/vloxseg2ei16.c +++ b/auto-generated/llvm-api-tests/vloxseg2ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxseg2ei32.c b/auto-generated/llvm-api-tests/vloxseg2ei32.c index bcaf9bda8..58e353881 100644 --- a/auto-generated/llvm-api-tests/vloxseg2ei32.c +++ b/auto-generated/llvm-api-tests/vloxseg2ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxseg2ei64.c b/auto-generated/llvm-api-tests/vloxseg2ei64.c index 15c0eb168..03cd5d99b 100644 --- a/auto-generated/llvm-api-tests/vloxseg2ei64.c +++ b/auto-generated/llvm-api-tests/vloxseg2ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxseg2ei8.c b/auto-generated/llvm-api-tests/vloxseg2ei8.c index daa5391f1..bf9b24527 100644 --- a/auto-generated/llvm-api-tests/vloxseg2ei8.c +++ b/auto-generated/llvm-api-tests/vloxseg2ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxseg3ei16.c b/auto-generated/llvm-api-tests/vloxseg3ei16.c index eff43cb00..eec681cbf 100644 --- a/auto-generated/llvm-api-tests/vloxseg3ei16.c +++ b/auto-generated/llvm-api-tests/vloxseg3ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxseg3ei32.c b/auto-generated/llvm-api-tests/vloxseg3ei32.c index 9c22904f6..a70173a1d 100644 --- a/auto-generated/llvm-api-tests/vloxseg3ei32.c +++ b/auto-generated/llvm-api-tests/vloxseg3ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxseg3ei64.c b/auto-generated/llvm-api-tests/vloxseg3ei64.c index 3ed198662..d036adb1c 100644 --- a/auto-generated/llvm-api-tests/vloxseg3ei64.c +++ b/auto-generated/llvm-api-tests/vloxseg3ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxseg3ei8.c b/auto-generated/llvm-api-tests/vloxseg3ei8.c index 8f2aa2c20..9eed40296 100644 --- a/auto-generated/llvm-api-tests/vloxseg3ei8.c +++ b/auto-generated/llvm-api-tests/vloxseg3ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxseg4ei16.c b/auto-generated/llvm-api-tests/vloxseg4ei16.c index d7265f89a..7b1c86cd3 100644 --- a/auto-generated/llvm-api-tests/vloxseg4ei16.c +++ b/auto-generated/llvm-api-tests/vloxseg4ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxseg4ei32.c b/auto-generated/llvm-api-tests/vloxseg4ei32.c index 0486fb77d..24ffa725b 100644 --- a/auto-generated/llvm-api-tests/vloxseg4ei32.c +++ b/auto-generated/llvm-api-tests/vloxseg4ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxseg4ei64.c b/auto-generated/llvm-api-tests/vloxseg4ei64.c index de6f1a886..7f49b027e 100644 --- a/auto-generated/llvm-api-tests/vloxseg4ei64.c +++ b/auto-generated/llvm-api-tests/vloxseg4ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxseg4ei8.c b/auto-generated/llvm-api-tests/vloxseg4ei8.c index d0defec82..6b04e997b 100644 --- a/auto-generated/llvm-api-tests/vloxseg4ei8.c +++ b/auto-generated/llvm-api-tests/vloxseg4ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxseg5ei16.c b/auto-generated/llvm-api-tests/vloxseg5ei16.c index 0d2b44bea..1a1a1b2ed 100644 --- a/auto-generated/llvm-api-tests/vloxseg5ei16.c +++ b/auto-generated/llvm-api-tests/vloxseg5ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxseg5ei32.c b/auto-generated/llvm-api-tests/vloxseg5ei32.c index a80622c93..836f47939 100644 --- a/auto-generated/llvm-api-tests/vloxseg5ei32.c +++ b/auto-generated/llvm-api-tests/vloxseg5ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxseg5ei64.c b/auto-generated/llvm-api-tests/vloxseg5ei64.c index 9b13b99fc..ba914fd2f 100644 --- a/auto-generated/llvm-api-tests/vloxseg5ei64.c +++ b/auto-generated/llvm-api-tests/vloxseg5ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxseg5ei8.c b/auto-generated/llvm-api-tests/vloxseg5ei8.c index db6007934..6bce373aa 100644 --- a/auto-generated/llvm-api-tests/vloxseg5ei8.c +++ b/auto-generated/llvm-api-tests/vloxseg5ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxseg6ei16.c b/auto-generated/llvm-api-tests/vloxseg6ei16.c index 4415a7ab7..c27226625 100644 --- a/auto-generated/llvm-api-tests/vloxseg6ei16.c +++ b/auto-generated/llvm-api-tests/vloxseg6ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxseg6ei32.c b/auto-generated/llvm-api-tests/vloxseg6ei32.c index 637fd8d87..61413cd94 100644 --- a/auto-generated/llvm-api-tests/vloxseg6ei32.c +++ b/auto-generated/llvm-api-tests/vloxseg6ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxseg6ei64.c b/auto-generated/llvm-api-tests/vloxseg6ei64.c index 8d9b8736e..ff68337f5 100644 --- a/auto-generated/llvm-api-tests/vloxseg6ei64.c +++ b/auto-generated/llvm-api-tests/vloxseg6ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxseg6ei8.c b/auto-generated/llvm-api-tests/vloxseg6ei8.c index 5d4eb2a2a..1592801ff 100644 --- a/auto-generated/llvm-api-tests/vloxseg6ei8.c +++ b/auto-generated/llvm-api-tests/vloxseg6ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxseg7ei16.c b/auto-generated/llvm-api-tests/vloxseg7ei16.c index 6080aa71f..c2f3fac8c 100644 --- a/auto-generated/llvm-api-tests/vloxseg7ei16.c +++ b/auto-generated/llvm-api-tests/vloxseg7ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxseg7ei32.c b/auto-generated/llvm-api-tests/vloxseg7ei32.c index 2d5dcbceb..7d8bc264b 100644 --- a/auto-generated/llvm-api-tests/vloxseg7ei32.c +++ b/auto-generated/llvm-api-tests/vloxseg7ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxseg7ei64.c b/auto-generated/llvm-api-tests/vloxseg7ei64.c index 02241410c..398e51c31 100644 --- a/auto-generated/llvm-api-tests/vloxseg7ei64.c +++ b/auto-generated/llvm-api-tests/vloxseg7ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxseg7ei8.c b/auto-generated/llvm-api-tests/vloxseg7ei8.c index 76401de47..206b328f2 100644 --- a/auto-generated/llvm-api-tests/vloxseg7ei8.c +++ b/auto-generated/llvm-api-tests/vloxseg7ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxseg8ei16.c b/auto-generated/llvm-api-tests/vloxseg8ei16.c index 938a60c4b..3966b114a 100644 --- a/auto-generated/llvm-api-tests/vloxseg8ei16.c +++ b/auto-generated/llvm-api-tests/vloxseg8ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxseg8ei32.c b/auto-generated/llvm-api-tests/vloxseg8ei32.c index ce285812a..128142a26 100644 --- a/auto-generated/llvm-api-tests/vloxseg8ei32.c +++ b/auto-generated/llvm-api-tests/vloxseg8ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxseg8ei64.c b/auto-generated/llvm-api-tests/vloxseg8ei64.c index b1edf81ed..1403d63e3 100644 --- a/auto-generated/llvm-api-tests/vloxseg8ei64.c +++ b/auto-generated/llvm-api-tests/vloxseg8ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vloxseg8ei8.c b/auto-generated/llvm-api-tests/vloxseg8ei8.c index fe0929cc4..e5dd5e467 100644 --- a/auto-generated/llvm-api-tests/vloxseg8ei8.c +++ b/auto-generated/llvm-api-tests/vloxseg8ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlse16.c b/auto-generated/llvm-api-tests/vlse16.c index 90e15466e..eab608682 100644 --- a/auto-generated/llvm-api-tests/vlse16.c +++ b/auto-generated/llvm-api-tests/vlse16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlse32.c b/auto-generated/llvm-api-tests/vlse32.c index 251f358b1..1cfb98b97 100644 --- a/auto-generated/llvm-api-tests/vlse32.c +++ b/auto-generated/llvm-api-tests/vlse32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlse64.c b/auto-generated/llvm-api-tests/vlse64.c index 68d9b5b2a..061ade920 100644 --- a/auto-generated/llvm-api-tests/vlse64.c +++ b/auto-generated/llvm-api-tests/vlse64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg2e16.c b/auto-generated/llvm-api-tests/vlseg2e16.c index bc8b9bad9..71f25cf95 100644 --- a/auto-generated/llvm-api-tests/vlseg2e16.c +++ b/auto-generated/llvm-api-tests/vlseg2e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg2e16ff.c b/auto-generated/llvm-api-tests/vlseg2e16ff.c index b97992fd8..99268dc34 100644 --- a/auto-generated/llvm-api-tests/vlseg2e16ff.c +++ b/auto-generated/llvm-api-tests/vlseg2e16ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg2e32.c b/auto-generated/llvm-api-tests/vlseg2e32.c index ef79ea766..ae76f5585 100644 --- a/auto-generated/llvm-api-tests/vlseg2e32.c +++ b/auto-generated/llvm-api-tests/vlseg2e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg2e32ff.c b/auto-generated/llvm-api-tests/vlseg2e32ff.c index 8facb295d..bc3c05f5b 100644 --- a/auto-generated/llvm-api-tests/vlseg2e32ff.c +++ b/auto-generated/llvm-api-tests/vlseg2e32ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg2e64.c b/auto-generated/llvm-api-tests/vlseg2e64.c index 48ceb013d..34c0d58b8 100644 --- a/auto-generated/llvm-api-tests/vlseg2e64.c +++ b/auto-generated/llvm-api-tests/vlseg2e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg2e64ff.c b/auto-generated/llvm-api-tests/vlseg2e64ff.c index 361262628..7699a25e6 100644 --- a/auto-generated/llvm-api-tests/vlseg2e64ff.c +++ b/auto-generated/llvm-api-tests/vlseg2e64ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg2e8ff.c b/auto-generated/llvm-api-tests/vlseg2e8ff.c index 3a4cd942e..72b14cbbd 100644 --- a/auto-generated/llvm-api-tests/vlseg2e8ff.c +++ b/auto-generated/llvm-api-tests/vlseg2e8ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg3e16.c b/auto-generated/llvm-api-tests/vlseg3e16.c index 711d3a29a..98e3744bf 100644 --- a/auto-generated/llvm-api-tests/vlseg3e16.c +++ b/auto-generated/llvm-api-tests/vlseg3e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg3e16ff.c b/auto-generated/llvm-api-tests/vlseg3e16ff.c index 231804341..0c496428c 100644 --- a/auto-generated/llvm-api-tests/vlseg3e16ff.c +++ b/auto-generated/llvm-api-tests/vlseg3e16ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg3e32.c b/auto-generated/llvm-api-tests/vlseg3e32.c index 51f9a55e4..84e99d77a 100644 --- a/auto-generated/llvm-api-tests/vlseg3e32.c +++ b/auto-generated/llvm-api-tests/vlseg3e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg3e32ff.c b/auto-generated/llvm-api-tests/vlseg3e32ff.c index 5b93ced09..5e8c66e0f 100644 --- a/auto-generated/llvm-api-tests/vlseg3e32ff.c +++ b/auto-generated/llvm-api-tests/vlseg3e32ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg3e64.c b/auto-generated/llvm-api-tests/vlseg3e64.c index c156869fe..397d61da9 100644 --- a/auto-generated/llvm-api-tests/vlseg3e64.c +++ b/auto-generated/llvm-api-tests/vlseg3e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg3e64ff.c b/auto-generated/llvm-api-tests/vlseg3e64ff.c index f1f2a19c9..9d820e598 100644 --- a/auto-generated/llvm-api-tests/vlseg3e64ff.c +++ b/auto-generated/llvm-api-tests/vlseg3e64ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg3e8ff.c b/auto-generated/llvm-api-tests/vlseg3e8ff.c index 53131921d..7676dbddc 100644 --- a/auto-generated/llvm-api-tests/vlseg3e8ff.c +++ b/auto-generated/llvm-api-tests/vlseg3e8ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg4e16.c b/auto-generated/llvm-api-tests/vlseg4e16.c index 32d655d0f..9c3c85978 100644 --- a/auto-generated/llvm-api-tests/vlseg4e16.c +++ b/auto-generated/llvm-api-tests/vlseg4e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg4e16ff.c b/auto-generated/llvm-api-tests/vlseg4e16ff.c index e2a731656..a3424dac0 100644 --- a/auto-generated/llvm-api-tests/vlseg4e16ff.c +++ b/auto-generated/llvm-api-tests/vlseg4e16ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg4e32.c b/auto-generated/llvm-api-tests/vlseg4e32.c index fe3f00ce6..cc48e8770 100644 --- a/auto-generated/llvm-api-tests/vlseg4e32.c +++ b/auto-generated/llvm-api-tests/vlseg4e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg4e32ff.c b/auto-generated/llvm-api-tests/vlseg4e32ff.c index 9f444d9e5..7e1712f5b 100644 --- a/auto-generated/llvm-api-tests/vlseg4e32ff.c +++ b/auto-generated/llvm-api-tests/vlseg4e32ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg4e64.c b/auto-generated/llvm-api-tests/vlseg4e64.c index 2c722cea4..068480511 100644 --- a/auto-generated/llvm-api-tests/vlseg4e64.c +++ b/auto-generated/llvm-api-tests/vlseg4e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg4e64ff.c b/auto-generated/llvm-api-tests/vlseg4e64ff.c index 742d58c7f..1e6b01a8d 100644 --- a/auto-generated/llvm-api-tests/vlseg4e64ff.c +++ b/auto-generated/llvm-api-tests/vlseg4e64ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg4e8ff.c b/auto-generated/llvm-api-tests/vlseg4e8ff.c index 0bbc04d02..cca3754b1 100644 --- a/auto-generated/llvm-api-tests/vlseg4e8ff.c +++ b/auto-generated/llvm-api-tests/vlseg4e8ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg5e16.c b/auto-generated/llvm-api-tests/vlseg5e16.c index ea0ed124b..e92cfbabe 100644 --- a/auto-generated/llvm-api-tests/vlseg5e16.c +++ b/auto-generated/llvm-api-tests/vlseg5e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg5e16ff.c b/auto-generated/llvm-api-tests/vlseg5e16ff.c index c3b8d8eba..f5dc0b4b2 100644 --- a/auto-generated/llvm-api-tests/vlseg5e16ff.c +++ b/auto-generated/llvm-api-tests/vlseg5e16ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg5e32.c b/auto-generated/llvm-api-tests/vlseg5e32.c index 21da0e4b7..66811f1fd 100644 --- a/auto-generated/llvm-api-tests/vlseg5e32.c +++ b/auto-generated/llvm-api-tests/vlseg5e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg5e32ff.c b/auto-generated/llvm-api-tests/vlseg5e32ff.c index 21a95ce70..bcb66c87e 100644 --- a/auto-generated/llvm-api-tests/vlseg5e32ff.c +++ b/auto-generated/llvm-api-tests/vlseg5e32ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg5e64.c b/auto-generated/llvm-api-tests/vlseg5e64.c index f6647642d..d10cbeb5d 100644 --- a/auto-generated/llvm-api-tests/vlseg5e64.c +++ b/auto-generated/llvm-api-tests/vlseg5e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg5e64ff.c b/auto-generated/llvm-api-tests/vlseg5e64ff.c index 8fadaf499..b0069d3b7 100644 --- a/auto-generated/llvm-api-tests/vlseg5e64ff.c +++ b/auto-generated/llvm-api-tests/vlseg5e64ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg5e8ff.c b/auto-generated/llvm-api-tests/vlseg5e8ff.c index 558d621b0..0a0d83444 100644 --- a/auto-generated/llvm-api-tests/vlseg5e8ff.c +++ b/auto-generated/llvm-api-tests/vlseg5e8ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg6e16.c b/auto-generated/llvm-api-tests/vlseg6e16.c index b7ce52ee2..6bb7496a6 100644 --- a/auto-generated/llvm-api-tests/vlseg6e16.c +++ b/auto-generated/llvm-api-tests/vlseg6e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg6e16ff.c b/auto-generated/llvm-api-tests/vlseg6e16ff.c index a63f555d7..639729e02 100644 --- a/auto-generated/llvm-api-tests/vlseg6e16ff.c +++ b/auto-generated/llvm-api-tests/vlseg6e16ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg6e32.c b/auto-generated/llvm-api-tests/vlseg6e32.c index acb7c1d44..92463b7fe 100644 --- a/auto-generated/llvm-api-tests/vlseg6e32.c +++ b/auto-generated/llvm-api-tests/vlseg6e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg6e32ff.c b/auto-generated/llvm-api-tests/vlseg6e32ff.c index 363e14621..034614b1f 100644 --- a/auto-generated/llvm-api-tests/vlseg6e32ff.c +++ b/auto-generated/llvm-api-tests/vlseg6e32ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg6e64.c b/auto-generated/llvm-api-tests/vlseg6e64.c index 31453fcd8..dc86357ce 100644 --- a/auto-generated/llvm-api-tests/vlseg6e64.c +++ b/auto-generated/llvm-api-tests/vlseg6e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg6e64ff.c b/auto-generated/llvm-api-tests/vlseg6e64ff.c index e4ea835c2..6cce1d8d5 100644 --- a/auto-generated/llvm-api-tests/vlseg6e64ff.c +++ b/auto-generated/llvm-api-tests/vlseg6e64ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg6e8ff.c b/auto-generated/llvm-api-tests/vlseg6e8ff.c index cd0310064..30df3e7ab 100644 --- a/auto-generated/llvm-api-tests/vlseg6e8ff.c +++ b/auto-generated/llvm-api-tests/vlseg6e8ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg7e16.c b/auto-generated/llvm-api-tests/vlseg7e16.c index 77ff3e6d1..843b4b90f 100644 --- a/auto-generated/llvm-api-tests/vlseg7e16.c +++ b/auto-generated/llvm-api-tests/vlseg7e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg7e16ff.c b/auto-generated/llvm-api-tests/vlseg7e16ff.c index a396ff1cf..36c0c3fde 100644 --- a/auto-generated/llvm-api-tests/vlseg7e16ff.c +++ b/auto-generated/llvm-api-tests/vlseg7e16ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg7e32.c b/auto-generated/llvm-api-tests/vlseg7e32.c index 4854831c9..f2be18bfa 100644 --- a/auto-generated/llvm-api-tests/vlseg7e32.c +++ b/auto-generated/llvm-api-tests/vlseg7e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg7e32ff.c b/auto-generated/llvm-api-tests/vlseg7e32ff.c index ff216e035..ded22607d 100644 --- a/auto-generated/llvm-api-tests/vlseg7e32ff.c +++ b/auto-generated/llvm-api-tests/vlseg7e32ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg7e64.c b/auto-generated/llvm-api-tests/vlseg7e64.c index e9e86c28d..fd21f691d 100644 --- a/auto-generated/llvm-api-tests/vlseg7e64.c +++ b/auto-generated/llvm-api-tests/vlseg7e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg7e64ff.c b/auto-generated/llvm-api-tests/vlseg7e64ff.c index 8db61b1d7..d26691383 100644 --- a/auto-generated/llvm-api-tests/vlseg7e64ff.c +++ b/auto-generated/llvm-api-tests/vlseg7e64ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg7e8ff.c b/auto-generated/llvm-api-tests/vlseg7e8ff.c index 8553f8dd7..c61054187 100644 --- a/auto-generated/llvm-api-tests/vlseg7e8ff.c +++ b/auto-generated/llvm-api-tests/vlseg7e8ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg8e16.c b/auto-generated/llvm-api-tests/vlseg8e16.c index 3f4871f13..25a8ba021 100644 --- a/auto-generated/llvm-api-tests/vlseg8e16.c +++ b/auto-generated/llvm-api-tests/vlseg8e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg8e16ff.c b/auto-generated/llvm-api-tests/vlseg8e16ff.c index cb89b04e0..a97949ea8 100644 --- a/auto-generated/llvm-api-tests/vlseg8e16ff.c +++ b/auto-generated/llvm-api-tests/vlseg8e16ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg8e32.c b/auto-generated/llvm-api-tests/vlseg8e32.c index 517199ff5..f6e57b2b3 100644 --- a/auto-generated/llvm-api-tests/vlseg8e32.c +++ b/auto-generated/llvm-api-tests/vlseg8e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg8e32ff.c b/auto-generated/llvm-api-tests/vlseg8e32ff.c index 9d2cead21..5628e7350 100644 --- a/auto-generated/llvm-api-tests/vlseg8e32ff.c +++ b/auto-generated/llvm-api-tests/vlseg8e32ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg8e64.c b/auto-generated/llvm-api-tests/vlseg8e64.c index 527a8ae90..f27101799 100644 --- a/auto-generated/llvm-api-tests/vlseg8e64.c +++ b/auto-generated/llvm-api-tests/vlseg8e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg8e64ff.c b/auto-generated/llvm-api-tests/vlseg8e64ff.c index c75296ca4..ab17e31e1 100644 --- a/auto-generated/llvm-api-tests/vlseg8e64ff.c +++ b/auto-generated/llvm-api-tests/vlseg8e64ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlseg8e8ff.c b/auto-generated/llvm-api-tests/vlseg8e8ff.c index 9d34f4b36..73f4a5256 100644 --- a/auto-generated/llvm-api-tests/vlseg8e8ff.c +++ b/auto-generated/llvm-api-tests/vlseg8e8ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlsseg2e16.c b/auto-generated/llvm-api-tests/vlsseg2e16.c index 874697cec..977ce06a9 100644 --- a/auto-generated/llvm-api-tests/vlsseg2e16.c +++ b/auto-generated/llvm-api-tests/vlsseg2e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlsseg2e32.c b/auto-generated/llvm-api-tests/vlsseg2e32.c index 4c1b9d906..0dad46d52 100644 --- a/auto-generated/llvm-api-tests/vlsseg2e32.c +++ b/auto-generated/llvm-api-tests/vlsseg2e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlsseg2e64.c b/auto-generated/llvm-api-tests/vlsseg2e64.c index 6fcce87ae..2a0d9bdc8 100644 --- a/auto-generated/llvm-api-tests/vlsseg2e64.c +++ b/auto-generated/llvm-api-tests/vlsseg2e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlsseg3e16.c b/auto-generated/llvm-api-tests/vlsseg3e16.c index 6f350d383..0367c073e 100644 --- a/auto-generated/llvm-api-tests/vlsseg3e16.c +++ b/auto-generated/llvm-api-tests/vlsseg3e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlsseg3e32.c b/auto-generated/llvm-api-tests/vlsseg3e32.c index 4bff8b886..adcc52ab0 100644 --- a/auto-generated/llvm-api-tests/vlsseg3e32.c +++ b/auto-generated/llvm-api-tests/vlsseg3e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlsseg3e64.c b/auto-generated/llvm-api-tests/vlsseg3e64.c index 5f2283c60..88feb12c0 100644 --- a/auto-generated/llvm-api-tests/vlsseg3e64.c +++ b/auto-generated/llvm-api-tests/vlsseg3e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlsseg4e16.c b/auto-generated/llvm-api-tests/vlsseg4e16.c index e2d8a4ab4..09b4c4b4e 100644 --- a/auto-generated/llvm-api-tests/vlsseg4e16.c +++ b/auto-generated/llvm-api-tests/vlsseg4e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlsseg4e32.c b/auto-generated/llvm-api-tests/vlsseg4e32.c index 472380cb7..a37c602b4 100644 --- a/auto-generated/llvm-api-tests/vlsseg4e32.c +++ b/auto-generated/llvm-api-tests/vlsseg4e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlsseg4e64.c b/auto-generated/llvm-api-tests/vlsseg4e64.c index 12cffbdb4..8e628bf23 100644 --- a/auto-generated/llvm-api-tests/vlsseg4e64.c +++ b/auto-generated/llvm-api-tests/vlsseg4e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlsseg5e16.c b/auto-generated/llvm-api-tests/vlsseg5e16.c index 70c7565c9..d09b1115c 100644 --- a/auto-generated/llvm-api-tests/vlsseg5e16.c +++ b/auto-generated/llvm-api-tests/vlsseg5e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlsseg5e32.c b/auto-generated/llvm-api-tests/vlsseg5e32.c index 41ecab1ac..291dd473e 100644 --- a/auto-generated/llvm-api-tests/vlsseg5e32.c +++ b/auto-generated/llvm-api-tests/vlsseg5e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlsseg5e64.c b/auto-generated/llvm-api-tests/vlsseg5e64.c index 8d798bb84..9b3bf3900 100644 --- a/auto-generated/llvm-api-tests/vlsseg5e64.c +++ b/auto-generated/llvm-api-tests/vlsseg5e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlsseg6e16.c b/auto-generated/llvm-api-tests/vlsseg6e16.c index e8723bbe7..a9c7a1ee8 100644 --- a/auto-generated/llvm-api-tests/vlsseg6e16.c +++ b/auto-generated/llvm-api-tests/vlsseg6e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlsseg6e32.c b/auto-generated/llvm-api-tests/vlsseg6e32.c index ef4581d9a..921f006fb 100644 --- a/auto-generated/llvm-api-tests/vlsseg6e32.c +++ b/auto-generated/llvm-api-tests/vlsseg6e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlsseg6e64.c b/auto-generated/llvm-api-tests/vlsseg6e64.c index 37362e168..83da13a50 100644 --- a/auto-generated/llvm-api-tests/vlsseg6e64.c +++ b/auto-generated/llvm-api-tests/vlsseg6e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlsseg7e16.c b/auto-generated/llvm-api-tests/vlsseg7e16.c index 6f750f528..f8c3d9c9a 100644 --- a/auto-generated/llvm-api-tests/vlsseg7e16.c +++ b/auto-generated/llvm-api-tests/vlsseg7e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlsseg7e32.c b/auto-generated/llvm-api-tests/vlsseg7e32.c index 71571b5df..59304da74 100644 --- a/auto-generated/llvm-api-tests/vlsseg7e32.c +++ b/auto-generated/llvm-api-tests/vlsseg7e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlsseg7e64.c b/auto-generated/llvm-api-tests/vlsseg7e64.c index d135a3ae2..505d986d4 100644 --- a/auto-generated/llvm-api-tests/vlsseg7e64.c +++ b/auto-generated/llvm-api-tests/vlsseg7e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlsseg8e16.c b/auto-generated/llvm-api-tests/vlsseg8e16.c index 94f212485..d5cc4e324 100644 --- a/auto-generated/llvm-api-tests/vlsseg8e16.c +++ b/auto-generated/llvm-api-tests/vlsseg8e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlsseg8e32.c b/auto-generated/llvm-api-tests/vlsseg8e32.c index e28628ddd..fe9045a17 100644 --- a/auto-generated/llvm-api-tests/vlsseg8e32.c +++ b/auto-generated/llvm-api-tests/vlsseg8e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vlsseg8e64.c b/auto-generated/llvm-api-tests/vlsseg8e64.c index c683ce2ae..30d5cc318 100644 --- a/auto-generated/llvm-api-tests/vlsseg8e64.c +++ b/auto-generated/llvm-api-tests/vlsseg8e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxei16.c b/auto-generated/llvm-api-tests/vluxei16.c index b4d820ff5..6b281a470 100644 --- a/auto-generated/llvm-api-tests/vluxei16.c +++ b/auto-generated/llvm-api-tests/vluxei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxei32.c b/auto-generated/llvm-api-tests/vluxei32.c index 12ef078aa..bde2bc58a 100644 --- a/auto-generated/llvm-api-tests/vluxei32.c +++ b/auto-generated/llvm-api-tests/vluxei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxei64.c b/auto-generated/llvm-api-tests/vluxei64.c index b4ab9a9af..5a41f809e 100644 --- a/auto-generated/llvm-api-tests/vluxei64.c +++ b/auto-generated/llvm-api-tests/vluxei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxei8.c b/auto-generated/llvm-api-tests/vluxei8.c index 8e96e51d0..9b50256f8 100644 --- a/auto-generated/llvm-api-tests/vluxei8.c +++ b/auto-generated/llvm-api-tests/vluxei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxseg2ei16.c b/auto-generated/llvm-api-tests/vluxseg2ei16.c index 31ba6d213..b5ae65212 100644 --- a/auto-generated/llvm-api-tests/vluxseg2ei16.c +++ b/auto-generated/llvm-api-tests/vluxseg2ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxseg2ei32.c b/auto-generated/llvm-api-tests/vluxseg2ei32.c index be01c09c0..af09beb50 100644 --- a/auto-generated/llvm-api-tests/vluxseg2ei32.c +++ b/auto-generated/llvm-api-tests/vluxseg2ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxseg2ei64.c b/auto-generated/llvm-api-tests/vluxseg2ei64.c index 429a7a6d8..d8713eadb 100644 --- a/auto-generated/llvm-api-tests/vluxseg2ei64.c +++ b/auto-generated/llvm-api-tests/vluxseg2ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxseg2ei8.c b/auto-generated/llvm-api-tests/vluxseg2ei8.c index ea611470e..7d961c616 100644 --- a/auto-generated/llvm-api-tests/vluxseg2ei8.c +++ b/auto-generated/llvm-api-tests/vluxseg2ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxseg3ei16.c b/auto-generated/llvm-api-tests/vluxseg3ei16.c index a7c14b660..dc9562f47 100644 --- a/auto-generated/llvm-api-tests/vluxseg3ei16.c +++ b/auto-generated/llvm-api-tests/vluxseg3ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxseg3ei32.c b/auto-generated/llvm-api-tests/vluxseg3ei32.c index bfe9cb7c2..65d033a2c 100644 --- a/auto-generated/llvm-api-tests/vluxseg3ei32.c +++ b/auto-generated/llvm-api-tests/vluxseg3ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxseg3ei64.c b/auto-generated/llvm-api-tests/vluxseg3ei64.c index 3b6170f4e..70c6be6c2 100644 --- a/auto-generated/llvm-api-tests/vluxseg3ei64.c +++ b/auto-generated/llvm-api-tests/vluxseg3ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxseg3ei8.c b/auto-generated/llvm-api-tests/vluxseg3ei8.c index 7bb1b24c2..4cf9e703f 100644 --- a/auto-generated/llvm-api-tests/vluxseg3ei8.c +++ b/auto-generated/llvm-api-tests/vluxseg3ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxseg4ei16.c b/auto-generated/llvm-api-tests/vluxseg4ei16.c index 91d9b36c3..d65918fe7 100644 --- a/auto-generated/llvm-api-tests/vluxseg4ei16.c +++ b/auto-generated/llvm-api-tests/vluxseg4ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxseg4ei32.c b/auto-generated/llvm-api-tests/vluxseg4ei32.c index a5bd87d38..3d14e39be 100644 --- a/auto-generated/llvm-api-tests/vluxseg4ei32.c +++ b/auto-generated/llvm-api-tests/vluxseg4ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxseg4ei64.c b/auto-generated/llvm-api-tests/vluxseg4ei64.c index 6fbe922cf..e55084b40 100644 --- a/auto-generated/llvm-api-tests/vluxseg4ei64.c +++ b/auto-generated/llvm-api-tests/vluxseg4ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxseg4ei8.c b/auto-generated/llvm-api-tests/vluxseg4ei8.c index db4ed1b5a..a7f3dadce 100644 --- a/auto-generated/llvm-api-tests/vluxseg4ei8.c +++ b/auto-generated/llvm-api-tests/vluxseg4ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxseg5ei16.c b/auto-generated/llvm-api-tests/vluxseg5ei16.c index 9058c4d9c..7edfef24a 100644 --- a/auto-generated/llvm-api-tests/vluxseg5ei16.c +++ b/auto-generated/llvm-api-tests/vluxseg5ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxseg5ei32.c b/auto-generated/llvm-api-tests/vluxseg5ei32.c index cd6040fb4..b0d47ac20 100644 --- a/auto-generated/llvm-api-tests/vluxseg5ei32.c +++ b/auto-generated/llvm-api-tests/vluxseg5ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxseg5ei64.c b/auto-generated/llvm-api-tests/vluxseg5ei64.c index 49e878f72..2344d95c1 100644 --- a/auto-generated/llvm-api-tests/vluxseg5ei64.c +++ b/auto-generated/llvm-api-tests/vluxseg5ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxseg5ei8.c b/auto-generated/llvm-api-tests/vluxseg5ei8.c index bb646f12c..b2c831c78 100644 --- a/auto-generated/llvm-api-tests/vluxseg5ei8.c +++ b/auto-generated/llvm-api-tests/vluxseg5ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxseg6ei16.c b/auto-generated/llvm-api-tests/vluxseg6ei16.c index d5b9289da..6a0dedfac 100644 --- a/auto-generated/llvm-api-tests/vluxseg6ei16.c +++ b/auto-generated/llvm-api-tests/vluxseg6ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxseg6ei32.c b/auto-generated/llvm-api-tests/vluxseg6ei32.c index 3d83184a6..f4c89dba4 100644 --- a/auto-generated/llvm-api-tests/vluxseg6ei32.c +++ b/auto-generated/llvm-api-tests/vluxseg6ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxseg6ei64.c b/auto-generated/llvm-api-tests/vluxseg6ei64.c index b51c09de4..086bd24fb 100644 --- a/auto-generated/llvm-api-tests/vluxseg6ei64.c +++ b/auto-generated/llvm-api-tests/vluxseg6ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxseg6ei8.c b/auto-generated/llvm-api-tests/vluxseg6ei8.c index 658cbd0bf..fc111a312 100644 --- a/auto-generated/llvm-api-tests/vluxseg6ei8.c +++ b/auto-generated/llvm-api-tests/vluxseg6ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxseg7ei16.c b/auto-generated/llvm-api-tests/vluxseg7ei16.c index 468bce389..41e131ad0 100644 --- a/auto-generated/llvm-api-tests/vluxseg7ei16.c +++ b/auto-generated/llvm-api-tests/vluxseg7ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxseg7ei32.c b/auto-generated/llvm-api-tests/vluxseg7ei32.c index b43d48ecf..f4b52a4f8 100644 --- a/auto-generated/llvm-api-tests/vluxseg7ei32.c +++ b/auto-generated/llvm-api-tests/vluxseg7ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxseg7ei64.c b/auto-generated/llvm-api-tests/vluxseg7ei64.c index c92945c99..ad8d20a76 100644 --- a/auto-generated/llvm-api-tests/vluxseg7ei64.c +++ b/auto-generated/llvm-api-tests/vluxseg7ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxseg7ei8.c b/auto-generated/llvm-api-tests/vluxseg7ei8.c index ba0d86f3e..9769d1f41 100644 --- a/auto-generated/llvm-api-tests/vluxseg7ei8.c +++ b/auto-generated/llvm-api-tests/vluxseg7ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxseg8ei16.c b/auto-generated/llvm-api-tests/vluxseg8ei16.c index 44908b9a1..582ec4146 100644 --- a/auto-generated/llvm-api-tests/vluxseg8ei16.c +++ b/auto-generated/llvm-api-tests/vluxseg8ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxseg8ei32.c b/auto-generated/llvm-api-tests/vluxseg8ei32.c index a4b1219be..e088d1b62 100644 --- a/auto-generated/llvm-api-tests/vluxseg8ei32.c +++ b/auto-generated/llvm-api-tests/vluxseg8ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxseg8ei64.c b/auto-generated/llvm-api-tests/vluxseg8ei64.c index 89d51431f..47aa7ae28 100644 --- a/auto-generated/llvm-api-tests/vluxseg8ei64.c +++ b/auto-generated/llvm-api-tests/vluxseg8ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vluxseg8ei8.c b/auto-generated/llvm-api-tests/vluxseg8ei8.c index b15309835..690158f00 100644 --- a/auto-generated/llvm-api-tests/vluxseg8ei8.c +++ b/auto-generated/llvm-api-tests/vluxseg8ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vmacc.c b/auto-generated/llvm-api-tests/vmacc.c index 5c9b9dba9..8a1e72d22 100644 --- a/auto-generated/llvm-api-tests/vmacc.c +++ b/auto-generated/llvm-api-tests/vmacc.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vmadd.c b/auto-generated/llvm-api-tests/vmadd.c index dee28e0f2..9639cbdab 100644 --- a/auto-generated/llvm-api-tests/vmadd.c +++ b/auto-generated/llvm-api-tests/vmadd.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vmerge.c b/auto-generated/llvm-api-tests/vmerge.c index 39511afe8..2d4ed4402 100644 --- a/auto-generated/llvm-api-tests/vmerge.c +++ b/auto-generated/llvm-api-tests/vmerge.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vmfeq.c b/auto-generated/llvm-api-tests/vmfeq.c index 326530d06..791bf1a4a 100644 --- a/auto-generated/llvm-api-tests/vmfeq.c +++ b/auto-generated/llvm-api-tests/vmfeq.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vmfge.c b/auto-generated/llvm-api-tests/vmfge.c index bff02eedf..adb686e36 100644 --- a/auto-generated/llvm-api-tests/vmfge.c +++ b/auto-generated/llvm-api-tests/vmfge.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vmfgt.c b/auto-generated/llvm-api-tests/vmfgt.c index 86f04c353..84836951d 100644 --- a/auto-generated/llvm-api-tests/vmfgt.c +++ b/auto-generated/llvm-api-tests/vmfgt.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vmfle.c b/auto-generated/llvm-api-tests/vmfle.c index 36d68ca41..f90ae9d3d 100644 --- a/auto-generated/llvm-api-tests/vmfle.c +++ b/auto-generated/llvm-api-tests/vmfle.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vmflt.c b/auto-generated/llvm-api-tests/vmflt.c index 0b0604928..95863ba18 100644 --- a/auto-generated/llvm-api-tests/vmflt.c +++ b/auto-generated/llvm-api-tests/vmflt.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vmfne.c b/auto-generated/llvm-api-tests/vmfne.c index 786f58720..0881e5478 100644 --- a/auto-generated/llvm-api-tests/vmfne.c +++ b/auto-generated/llvm-api-tests/vmfne.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vmmv.c b/auto-generated/llvm-api-tests/vmmv.c index c786f72ff..7c8f669ed 100644 --- a/auto-generated/llvm-api-tests/vmmv.c +++ b/auto-generated/llvm-api-tests/vmmv.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vmseq.c b/auto-generated/llvm-api-tests/vmseq.c index bc722ffdc..3abc8879a 100644 --- a/auto-generated/llvm-api-tests/vmseq.c +++ b/auto-generated/llvm-api-tests/vmseq.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vmsge.c b/auto-generated/llvm-api-tests/vmsge.c index 0d400280f..eeb06b8f2 100644 --- a/auto-generated/llvm-api-tests/vmsge.c +++ b/auto-generated/llvm-api-tests/vmsge.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vmsgeu.c b/auto-generated/llvm-api-tests/vmsgeu.c index 07fd78ee8..c4ccd06b9 100644 --- a/auto-generated/llvm-api-tests/vmsgeu.c +++ b/auto-generated/llvm-api-tests/vmsgeu.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vmsgt.c b/auto-generated/llvm-api-tests/vmsgt.c index 8ea07b8d5..bdba1b550 100644 --- a/auto-generated/llvm-api-tests/vmsgt.c +++ b/auto-generated/llvm-api-tests/vmsgt.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vmsgtu.c b/auto-generated/llvm-api-tests/vmsgtu.c index 816d16f7d..af7283923 100644 --- a/auto-generated/llvm-api-tests/vmsgtu.c +++ b/auto-generated/llvm-api-tests/vmsgtu.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vmsle.c b/auto-generated/llvm-api-tests/vmsle.c index 2da595dac..955a81cfa 100644 --- a/auto-generated/llvm-api-tests/vmsle.c +++ b/auto-generated/llvm-api-tests/vmsle.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vmsleu.c b/auto-generated/llvm-api-tests/vmsleu.c index 96bac8994..e46f71abc 100644 --- a/auto-generated/llvm-api-tests/vmsleu.c +++ b/auto-generated/llvm-api-tests/vmsleu.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vmslt.c b/auto-generated/llvm-api-tests/vmslt.c index 03f06d12e..9cd741d10 100644 --- a/auto-generated/llvm-api-tests/vmslt.c +++ b/auto-generated/llvm-api-tests/vmslt.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vmsltu.c b/auto-generated/llvm-api-tests/vmsltu.c index dc45d89dc..c85d44dc5 100644 --- a/auto-generated/llvm-api-tests/vmsltu.c +++ b/auto-generated/llvm-api-tests/vmsltu.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vmsne.c b/auto-generated/llvm-api-tests/vmsne.c index e25d3da80..a9bad979c 100644 --- a/auto-generated/llvm-api-tests/vmsne.c +++ b/auto-generated/llvm-api-tests/vmsne.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vmv.c b/auto-generated/llvm-api-tests/vmv.c index bad217d99..f6e9dbf6c 100644 --- a/auto-generated/llvm-api-tests/vmv.c +++ b/auto-generated/llvm-api-tests/vmv.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vneg.c b/auto-generated/llvm-api-tests/vneg.c index 9258fda61..c7d1620ba 100644 --- a/auto-generated/llvm-api-tests/vneg.c +++ b/auto-generated/llvm-api-tests/vneg.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vnmsac.c b/auto-generated/llvm-api-tests/vnmsac.c index 341eb9c12..22d5923a4 100644 --- a/auto-generated/llvm-api-tests/vnmsac.c +++ b/auto-generated/llvm-api-tests/vnmsac.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vnmsub.c b/auto-generated/llvm-api-tests/vnmsub.c index c3cabeeab..f1c18ebd1 100644 --- a/auto-generated/llvm-api-tests/vnmsub.c +++ b/auto-generated/llvm-api-tests/vnmsub.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vreinterpret.c b/auto-generated/llvm-api-tests/vreinterpret.c index 13a9eabd3..9a79e6875 100644 --- a/auto-generated/llvm-api-tests/vreinterpret.c +++ b/auto-generated/llvm-api-tests/vreinterpret.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vrgather.c b/auto-generated/llvm-api-tests/vrgather.c index c2f4d4083..4aff52536 100644 --- a/auto-generated/llvm-api-tests/vrgather.c +++ b/auto-generated/llvm-api-tests/vrgather.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vrgatherei16.c b/auto-generated/llvm-api-tests/vrgatherei16.c index 4cd08c1d6..9ca522d0a 100644 --- a/auto-generated/llvm-api-tests/vrgatherei16.c +++ b/auto-generated/llvm-api-tests/vrgatherei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vse16.c b/auto-generated/llvm-api-tests/vse16.c index 7d2c51cbd..9cf764b55 100644 --- a/auto-generated/llvm-api-tests/vse16.c +++ b/auto-generated/llvm-api-tests/vse16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vse32.c b/auto-generated/llvm-api-tests/vse32.c index 2f8cb09b7..9bca430fe 100644 --- a/auto-generated/llvm-api-tests/vse32.c +++ b/auto-generated/llvm-api-tests/vse32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vse64.c b/auto-generated/llvm-api-tests/vse64.c index d3fcf5245..2e2abd920 100644 --- a/auto-generated/llvm-api-tests/vse64.c +++ b/auto-generated/llvm-api-tests/vse64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vset.c b/auto-generated/llvm-api-tests/vset.c index 395a8b4f5..153e2df40 100644 --- a/auto-generated/llvm-api-tests/vset.c +++ b/auto-generated/llvm-api-tests/vset.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vslidedown.c b/auto-generated/llvm-api-tests/vslidedown.c index 367703143..b90c8a496 100644 --- a/auto-generated/llvm-api-tests/vslidedown.c +++ b/auto-generated/llvm-api-tests/vslidedown.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vslideup.c b/auto-generated/llvm-api-tests/vslideup.c index 9b15794ac..881f40e7f 100644 --- a/auto-generated/llvm-api-tests/vslideup.c +++ b/auto-generated/llvm-api-tests/vslideup.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxei16.c b/auto-generated/llvm-api-tests/vsoxei16.c index 27800c50e..68516a6ac 100644 --- a/auto-generated/llvm-api-tests/vsoxei16.c +++ b/auto-generated/llvm-api-tests/vsoxei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxei32.c b/auto-generated/llvm-api-tests/vsoxei32.c index b703ed6e7..9278c3de8 100644 --- a/auto-generated/llvm-api-tests/vsoxei32.c +++ b/auto-generated/llvm-api-tests/vsoxei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxei64.c b/auto-generated/llvm-api-tests/vsoxei64.c index 657a1d335..02fa5dfd5 100644 --- a/auto-generated/llvm-api-tests/vsoxei64.c +++ b/auto-generated/llvm-api-tests/vsoxei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxei8.c b/auto-generated/llvm-api-tests/vsoxei8.c index 3abc1c848..9e7485b89 100644 --- a/auto-generated/llvm-api-tests/vsoxei8.c +++ b/auto-generated/llvm-api-tests/vsoxei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxseg2ei16.c b/auto-generated/llvm-api-tests/vsoxseg2ei16.c index ea3d6a207..e311e05c4 100644 --- a/auto-generated/llvm-api-tests/vsoxseg2ei16.c +++ b/auto-generated/llvm-api-tests/vsoxseg2ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxseg2ei32.c b/auto-generated/llvm-api-tests/vsoxseg2ei32.c index abe4f11c2..389592f12 100644 --- a/auto-generated/llvm-api-tests/vsoxseg2ei32.c +++ b/auto-generated/llvm-api-tests/vsoxseg2ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxseg2ei64.c b/auto-generated/llvm-api-tests/vsoxseg2ei64.c index 2e17f7b09..699bb592d 100644 --- a/auto-generated/llvm-api-tests/vsoxseg2ei64.c +++ b/auto-generated/llvm-api-tests/vsoxseg2ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxseg2ei8.c b/auto-generated/llvm-api-tests/vsoxseg2ei8.c index 42555f84f..c62efac46 100644 --- a/auto-generated/llvm-api-tests/vsoxseg2ei8.c +++ b/auto-generated/llvm-api-tests/vsoxseg2ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxseg3ei16.c b/auto-generated/llvm-api-tests/vsoxseg3ei16.c index a7822cc70..bb00635a2 100644 --- a/auto-generated/llvm-api-tests/vsoxseg3ei16.c +++ b/auto-generated/llvm-api-tests/vsoxseg3ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxseg3ei32.c b/auto-generated/llvm-api-tests/vsoxseg3ei32.c index 5b2284115..1e5886b31 100644 --- a/auto-generated/llvm-api-tests/vsoxseg3ei32.c +++ b/auto-generated/llvm-api-tests/vsoxseg3ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxseg3ei64.c b/auto-generated/llvm-api-tests/vsoxseg3ei64.c index 585aac02c..200bd7dd7 100644 --- a/auto-generated/llvm-api-tests/vsoxseg3ei64.c +++ b/auto-generated/llvm-api-tests/vsoxseg3ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxseg3ei8.c b/auto-generated/llvm-api-tests/vsoxseg3ei8.c index a3cc915ec..b661e57ed 100644 --- a/auto-generated/llvm-api-tests/vsoxseg3ei8.c +++ b/auto-generated/llvm-api-tests/vsoxseg3ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxseg4ei16.c b/auto-generated/llvm-api-tests/vsoxseg4ei16.c index ab59b875a..1fb523444 100644 --- a/auto-generated/llvm-api-tests/vsoxseg4ei16.c +++ b/auto-generated/llvm-api-tests/vsoxseg4ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxseg4ei32.c b/auto-generated/llvm-api-tests/vsoxseg4ei32.c index dc6b8b51d..1790be30e 100644 --- a/auto-generated/llvm-api-tests/vsoxseg4ei32.c +++ b/auto-generated/llvm-api-tests/vsoxseg4ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxseg4ei64.c b/auto-generated/llvm-api-tests/vsoxseg4ei64.c index 005f96acc..11cfc3c5c 100644 --- a/auto-generated/llvm-api-tests/vsoxseg4ei64.c +++ b/auto-generated/llvm-api-tests/vsoxseg4ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxseg4ei8.c b/auto-generated/llvm-api-tests/vsoxseg4ei8.c index 7de140742..382a5f493 100644 --- a/auto-generated/llvm-api-tests/vsoxseg4ei8.c +++ b/auto-generated/llvm-api-tests/vsoxseg4ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxseg5ei16.c b/auto-generated/llvm-api-tests/vsoxseg5ei16.c index 14e190942..fe11d58b0 100644 --- a/auto-generated/llvm-api-tests/vsoxseg5ei16.c +++ b/auto-generated/llvm-api-tests/vsoxseg5ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxseg5ei32.c b/auto-generated/llvm-api-tests/vsoxseg5ei32.c index ad7af845a..06ca422ee 100644 --- a/auto-generated/llvm-api-tests/vsoxseg5ei32.c +++ b/auto-generated/llvm-api-tests/vsoxseg5ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxseg5ei64.c b/auto-generated/llvm-api-tests/vsoxseg5ei64.c index f9c22a039..d23cce915 100644 --- a/auto-generated/llvm-api-tests/vsoxseg5ei64.c +++ b/auto-generated/llvm-api-tests/vsoxseg5ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxseg5ei8.c b/auto-generated/llvm-api-tests/vsoxseg5ei8.c index 81a77c392..72bcb4b17 100644 --- a/auto-generated/llvm-api-tests/vsoxseg5ei8.c +++ b/auto-generated/llvm-api-tests/vsoxseg5ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxseg6ei16.c b/auto-generated/llvm-api-tests/vsoxseg6ei16.c index 1aa73b092..0b4a88b7d 100644 --- a/auto-generated/llvm-api-tests/vsoxseg6ei16.c +++ b/auto-generated/llvm-api-tests/vsoxseg6ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxseg6ei32.c b/auto-generated/llvm-api-tests/vsoxseg6ei32.c index 7f4e8f47f..ce342bb33 100644 --- a/auto-generated/llvm-api-tests/vsoxseg6ei32.c +++ b/auto-generated/llvm-api-tests/vsoxseg6ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxseg6ei64.c b/auto-generated/llvm-api-tests/vsoxseg6ei64.c index 3ed2522a7..6becd3572 100644 --- a/auto-generated/llvm-api-tests/vsoxseg6ei64.c +++ b/auto-generated/llvm-api-tests/vsoxseg6ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxseg6ei8.c b/auto-generated/llvm-api-tests/vsoxseg6ei8.c index 0b4cd1ef4..dc6929aae 100644 --- a/auto-generated/llvm-api-tests/vsoxseg6ei8.c +++ b/auto-generated/llvm-api-tests/vsoxseg6ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxseg7ei16.c b/auto-generated/llvm-api-tests/vsoxseg7ei16.c index de57c3f10..07735e79e 100644 --- a/auto-generated/llvm-api-tests/vsoxseg7ei16.c +++ b/auto-generated/llvm-api-tests/vsoxseg7ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxseg7ei32.c b/auto-generated/llvm-api-tests/vsoxseg7ei32.c index 5873913fe..ae4cc6404 100644 --- a/auto-generated/llvm-api-tests/vsoxseg7ei32.c +++ b/auto-generated/llvm-api-tests/vsoxseg7ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxseg7ei64.c b/auto-generated/llvm-api-tests/vsoxseg7ei64.c index 4fb97bee8..c656eb65c 100644 --- a/auto-generated/llvm-api-tests/vsoxseg7ei64.c +++ b/auto-generated/llvm-api-tests/vsoxseg7ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxseg7ei8.c b/auto-generated/llvm-api-tests/vsoxseg7ei8.c index 46d3f71a8..20c1ae546 100644 --- a/auto-generated/llvm-api-tests/vsoxseg7ei8.c +++ b/auto-generated/llvm-api-tests/vsoxseg7ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxseg8ei16.c b/auto-generated/llvm-api-tests/vsoxseg8ei16.c index b27b5dc6b..b91471794 100644 --- a/auto-generated/llvm-api-tests/vsoxseg8ei16.c +++ b/auto-generated/llvm-api-tests/vsoxseg8ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxseg8ei32.c b/auto-generated/llvm-api-tests/vsoxseg8ei32.c index 19eb01e8e..9fe682dd8 100644 --- a/auto-generated/llvm-api-tests/vsoxseg8ei32.c +++ b/auto-generated/llvm-api-tests/vsoxseg8ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxseg8ei64.c b/auto-generated/llvm-api-tests/vsoxseg8ei64.c index 59baadc85..6282ff055 100644 --- a/auto-generated/llvm-api-tests/vsoxseg8ei64.c +++ b/auto-generated/llvm-api-tests/vsoxseg8ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsoxseg8ei8.c b/auto-generated/llvm-api-tests/vsoxseg8ei8.c index 12b437606..2d1812b6c 100644 --- a/auto-generated/llvm-api-tests/vsoxseg8ei8.c +++ b/auto-generated/llvm-api-tests/vsoxseg8ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsse16.c b/auto-generated/llvm-api-tests/vsse16.c index 60fa72fe0..61be15df9 100644 --- a/auto-generated/llvm-api-tests/vsse16.c +++ b/auto-generated/llvm-api-tests/vsse16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsse32.c b/auto-generated/llvm-api-tests/vsse32.c index efe3cc2dc..3ac9c14c0 100644 --- a/auto-generated/llvm-api-tests/vsse32.c +++ b/auto-generated/llvm-api-tests/vsse32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsse64.c b/auto-generated/llvm-api-tests/vsse64.c index 744cc61bc..fa6d29fdf 100644 --- a/auto-generated/llvm-api-tests/vsse64.c +++ b/auto-generated/llvm-api-tests/vsse64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsseg2e16.c b/auto-generated/llvm-api-tests/vsseg2e16.c index 944f285d9..a3f9455af 100644 --- a/auto-generated/llvm-api-tests/vsseg2e16.c +++ b/auto-generated/llvm-api-tests/vsseg2e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsseg2e32.c b/auto-generated/llvm-api-tests/vsseg2e32.c index 8a1b08f87..9e01865e1 100644 --- a/auto-generated/llvm-api-tests/vsseg2e32.c +++ b/auto-generated/llvm-api-tests/vsseg2e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsseg2e64.c b/auto-generated/llvm-api-tests/vsseg2e64.c index d873ca18c..7c08081af 100644 --- a/auto-generated/llvm-api-tests/vsseg2e64.c +++ b/auto-generated/llvm-api-tests/vsseg2e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsseg3e16.c b/auto-generated/llvm-api-tests/vsseg3e16.c index 51094bd6a..4fa80c344 100644 --- a/auto-generated/llvm-api-tests/vsseg3e16.c +++ b/auto-generated/llvm-api-tests/vsseg3e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsseg3e32.c b/auto-generated/llvm-api-tests/vsseg3e32.c index 7205e0abd..9d42f35a1 100644 --- a/auto-generated/llvm-api-tests/vsseg3e32.c +++ b/auto-generated/llvm-api-tests/vsseg3e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsseg3e64.c b/auto-generated/llvm-api-tests/vsseg3e64.c index b85c443c8..719f8e39d 100644 --- a/auto-generated/llvm-api-tests/vsseg3e64.c +++ b/auto-generated/llvm-api-tests/vsseg3e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsseg4e16.c b/auto-generated/llvm-api-tests/vsseg4e16.c index d8e04d908..1dffa5926 100644 --- a/auto-generated/llvm-api-tests/vsseg4e16.c +++ b/auto-generated/llvm-api-tests/vsseg4e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsseg4e32.c b/auto-generated/llvm-api-tests/vsseg4e32.c index 982a01027..3f736f3bc 100644 --- a/auto-generated/llvm-api-tests/vsseg4e32.c +++ b/auto-generated/llvm-api-tests/vsseg4e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsseg4e64.c b/auto-generated/llvm-api-tests/vsseg4e64.c index b68040ff1..2b5b81b14 100644 --- a/auto-generated/llvm-api-tests/vsseg4e64.c +++ b/auto-generated/llvm-api-tests/vsseg4e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsseg5e16.c b/auto-generated/llvm-api-tests/vsseg5e16.c index e0741b425..c2c625cb4 100644 --- a/auto-generated/llvm-api-tests/vsseg5e16.c +++ b/auto-generated/llvm-api-tests/vsseg5e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsseg5e32.c b/auto-generated/llvm-api-tests/vsseg5e32.c index 017ba99cf..31f343ce2 100644 --- a/auto-generated/llvm-api-tests/vsseg5e32.c +++ b/auto-generated/llvm-api-tests/vsseg5e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsseg5e64.c b/auto-generated/llvm-api-tests/vsseg5e64.c index a0bb07a05..b3ffe050b 100644 --- a/auto-generated/llvm-api-tests/vsseg5e64.c +++ b/auto-generated/llvm-api-tests/vsseg5e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsseg6e16.c b/auto-generated/llvm-api-tests/vsseg6e16.c index 0cad15508..30de76dfe 100644 --- a/auto-generated/llvm-api-tests/vsseg6e16.c +++ b/auto-generated/llvm-api-tests/vsseg6e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsseg6e32.c b/auto-generated/llvm-api-tests/vsseg6e32.c index 295229185..1bffcfbe6 100644 --- a/auto-generated/llvm-api-tests/vsseg6e32.c +++ b/auto-generated/llvm-api-tests/vsseg6e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsseg6e64.c b/auto-generated/llvm-api-tests/vsseg6e64.c index cad51d893..9216f5dd6 100644 --- a/auto-generated/llvm-api-tests/vsseg6e64.c +++ b/auto-generated/llvm-api-tests/vsseg6e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsseg7e16.c b/auto-generated/llvm-api-tests/vsseg7e16.c index 7c364febe..97e5a4534 100644 --- a/auto-generated/llvm-api-tests/vsseg7e16.c +++ b/auto-generated/llvm-api-tests/vsseg7e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsseg7e32.c b/auto-generated/llvm-api-tests/vsseg7e32.c index 0d7118392..fd02e9698 100644 --- a/auto-generated/llvm-api-tests/vsseg7e32.c +++ b/auto-generated/llvm-api-tests/vsseg7e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsseg7e64.c b/auto-generated/llvm-api-tests/vsseg7e64.c index 8875951aa..6c4a5eb60 100644 --- a/auto-generated/llvm-api-tests/vsseg7e64.c +++ b/auto-generated/llvm-api-tests/vsseg7e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsseg8e16.c b/auto-generated/llvm-api-tests/vsseg8e16.c index bc1567f8f..4f0a86023 100644 --- a/auto-generated/llvm-api-tests/vsseg8e16.c +++ b/auto-generated/llvm-api-tests/vsseg8e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsseg8e32.c b/auto-generated/llvm-api-tests/vsseg8e32.c index 582690c4b..f07e04721 100644 --- a/auto-generated/llvm-api-tests/vsseg8e32.c +++ b/auto-generated/llvm-api-tests/vsseg8e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsseg8e64.c b/auto-generated/llvm-api-tests/vsseg8e64.c index 16011fcde..18a8b2359 100644 --- a/auto-generated/llvm-api-tests/vsseg8e64.c +++ b/auto-generated/llvm-api-tests/vsseg8e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vssseg2e16.c b/auto-generated/llvm-api-tests/vssseg2e16.c index 796f1abbb..3ec10f78c 100644 --- a/auto-generated/llvm-api-tests/vssseg2e16.c +++ b/auto-generated/llvm-api-tests/vssseg2e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vssseg2e32.c b/auto-generated/llvm-api-tests/vssseg2e32.c index f31322ca8..d8ae0594e 100644 --- a/auto-generated/llvm-api-tests/vssseg2e32.c +++ b/auto-generated/llvm-api-tests/vssseg2e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vssseg2e64.c b/auto-generated/llvm-api-tests/vssseg2e64.c index 7ac6fae76..5f4cd4e5d 100644 --- a/auto-generated/llvm-api-tests/vssseg2e64.c +++ b/auto-generated/llvm-api-tests/vssseg2e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vssseg3e16.c b/auto-generated/llvm-api-tests/vssseg3e16.c index d79a571c6..2f6c5f594 100644 --- a/auto-generated/llvm-api-tests/vssseg3e16.c +++ b/auto-generated/llvm-api-tests/vssseg3e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vssseg3e32.c b/auto-generated/llvm-api-tests/vssseg3e32.c index 97c04158e..81b22c279 100644 --- a/auto-generated/llvm-api-tests/vssseg3e32.c +++ b/auto-generated/llvm-api-tests/vssseg3e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vssseg3e64.c b/auto-generated/llvm-api-tests/vssseg3e64.c index 0ed0a191a..28a3e75c6 100644 --- a/auto-generated/llvm-api-tests/vssseg3e64.c +++ b/auto-generated/llvm-api-tests/vssseg3e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vssseg4e16.c b/auto-generated/llvm-api-tests/vssseg4e16.c index 4c4fbc9ab..c4a384df3 100644 --- a/auto-generated/llvm-api-tests/vssseg4e16.c +++ b/auto-generated/llvm-api-tests/vssseg4e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vssseg4e32.c b/auto-generated/llvm-api-tests/vssseg4e32.c index 62519d6c0..a569e8f94 100644 --- a/auto-generated/llvm-api-tests/vssseg4e32.c +++ b/auto-generated/llvm-api-tests/vssseg4e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vssseg4e64.c b/auto-generated/llvm-api-tests/vssseg4e64.c index 94263098f..01100f81f 100644 --- a/auto-generated/llvm-api-tests/vssseg4e64.c +++ b/auto-generated/llvm-api-tests/vssseg4e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vssseg5e16.c b/auto-generated/llvm-api-tests/vssseg5e16.c index b545fa5ee..e95ab2fc1 100644 --- a/auto-generated/llvm-api-tests/vssseg5e16.c +++ b/auto-generated/llvm-api-tests/vssseg5e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vssseg5e32.c b/auto-generated/llvm-api-tests/vssseg5e32.c index 2a468db9c..6e7d71dd9 100644 --- a/auto-generated/llvm-api-tests/vssseg5e32.c +++ b/auto-generated/llvm-api-tests/vssseg5e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vssseg5e64.c b/auto-generated/llvm-api-tests/vssseg5e64.c index b0cf77e09..1e046cae4 100644 --- a/auto-generated/llvm-api-tests/vssseg5e64.c +++ b/auto-generated/llvm-api-tests/vssseg5e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vssseg6e16.c b/auto-generated/llvm-api-tests/vssseg6e16.c index 2eebc9a96..5e33a8b2d 100644 --- a/auto-generated/llvm-api-tests/vssseg6e16.c +++ b/auto-generated/llvm-api-tests/vssseg6e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vssseg6e32.c b/auto-generated/llvm-api-tests/vssseg6e32.c index 1eaff86c1..42c296f8c 100644 --- a/auto-generated/llvm-api-tests/vssseg6e32.c +++ b/auto-generated/llvm-api-tests/vssseg6e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vssseg6e64.c b/auto-generated/llvm-api-tests/vssseg6e64.c index 3500d6ba5..2fc7c5f8b 100644 --- a/auto-generated/llvm-api-tests/vssseg6e64.c +++ b/auto-generated/llvm-api-tests/vssseg6e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vssseg7e16.c b/auto-generated/llvm-api-tests/vssseg7e16.c index a81b10970..13481da41 100644 --- a/auto-generated/llvm-api-tests/vssseg7e16.c +++ b/auto-generated/llvm-api-tests/vssseg7e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vssseg7e32.c b/auto-generated/llvm-api-tests/vssseg7e32.c index bfd26d03a..925d39d50 100644 --- a/auto-generated/llvm-api-tests/vssseg7e32.c +++ b/auto-generated/llvm-api-tests/vssseg7e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vssseg7e64.c b/auto-generated/llvm-api-tests/vssseg7e64.c index 931088aa0..793d50ad7 100644 --- a/auto-generated/llvm-api-tests/vssseg7e64.c +++ b/auto-generated/llvm-api-tests/vssseg7e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vssseg8e16.c b/auto-generated/llvm-api-tests/vssseg8e16.c index 6cdce9867..20c5c7839 100644 --- a/auto-generated/llvm-api-tests/vssseg8e16.c +++ b/auto-generated/llvm-api-tests/vssseg8e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vssseg8e32.c b/auto-generated/llvm-api-tests/vssseg8e32.c index ba8b10105..d3677da68 100644 --- a/auto-generated/llvm-api-tests/vssseg8e32.c +++ b/auto-generated/llvm-api-tests/vssseg8e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vssseg8e64.c b/auto-generated/llvm-api-tests/vssseg8e64.c index 199b8141b..64620fe05 100644 --- a/auto-generated/llvm-api-tests/vssseg8e64.c +++ b/auto-generated/llvm-api-tests/vssseg8e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxei16.c b/auto-generated/llvm-api-tests/vsuxei16.c index be0c6234f..ee7cdb5f1 100644 --- a/auto-generated/llvm-api-tests/vsuxei16.c +++ b/auto-generated/llvm-api-tests/vsuxei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxei32.c b/auto-generated/llvm-api-tests/vsuxei32.c index 387fd9943..14c07e591 100644 --- a/auto-generated/llvm-api-tests/vsuxei32.c +++ b/auto-generated/llvm-api-tests/vsuxei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxei64.c b/auto-generated/llvm-api-tests/vsuxei64.c index 6bcb56a76..f9802c574 100644 --- a/auto-generated/llvm-api-tests/vsuxei64.c +++ b/auto-generated/llvm-api-tests/vsuxei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxei8.c b/auto-generated/llvm-api-tests/vsuxei8.c index 2defa57be..4a037d151 100644 --- a/auto-generated/llvm-api-tests/vsuxei8.c +++ b/auto-generated/llvm-api-tests/vsuxei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxseg2ei16.c b/auto-generated/llvm-api-tests/vsuxseg2ei16.c index e9b52cdc9..74f74325c 100644 --- a/auto-generated/llvm-api-tests/vsuxseg2ei16.c +++ b/auto-generated/llvm-api-tests/vsuxseg2ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxseg2ei32.c b/auto-generated/llvm-api-tests/vsuxseg2ei32.c index ac6d49136..b013264e3 100644 --- a/auto-generated/llvm-api-tests/vsuxseg2ei32.c +++ b/auto-generated/llvm-api-tests/vsuxseg2ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxseg2ei64.c b/auto-generated/llvm-api-tests/vsuxseg2ei64.c index 842fc7fca..1266d496e 100644 --- a/auto-generated/llvm-api-tests/vsuxseg2ei64.c +++ b/auto-generated/llvm-api-tests/vsuxseg2ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxseg2ei8.c b/auto-generated/llvm-api-tests/vsuxseg2ei8.c index 597b8f1fd..8e6973d4e 100644 --- a/auto-generated/llvm-api-tests/vsuxseg2ei8.c +++ b/auto-generated/llvm-api-tests/vsuxseg2ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxseg3ei16.c b/auto-generated/llvm-api-tests/vsuxseg3ei16.c index adf729080..e20f72949 100644 --- a/auto-generated/llvm-api-tests/vsuxseg3ei16.c +++ b/auto-generated/llvm-api-tests/vsuxseg3ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxseg3ei32.c b/auto-generated/llvm-api-tests/vsuxseg3ei32.c index ff88b75b1..ec59086c9 100644 --- a/auto-generated/llvm-api-tests/vsuxseg3ei32.c +++ b/auto-generated/llvm-api-tests/vsuxseg3ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxseg3ei64.c b/auto-generated/llvm-api-tests/vsuxseg3ei64.c index 524e5a2b0..bbe214e93 100644 --- a/auto-generated/llvm-api-tests/vsuxseg3ei64.c +++ b/auto-generated/llvm-api-tests/vsuxseg3ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxseg3ei8.c b/auto-generated/llvm-api-tests/vsuxseg3ei8.c index ecb43ceca..d11704c1e 100644 --- a/auto-generated/llvm-api-tests/vsuxseg3ei8.c +++ b/auto-generated/llvm-api-tests/vsuxseg3ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxseg4ei16.c b/auto-generated/llvm-api-tests/vsuxseg4ei16.c index b8ab0207b..7b154a033 100644 --- a/auto-generated/llvm-api-tests/vsuxseg4ei16.c +++ b/auto-generated/llvm-api-tests/vsuxseg4ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxseg4ei32.c b/auto-generated/llvm-api-tests/vsuxseg4ei32.c index a5ad387a0..b3478e0c0 100644 --- a/auto-generated/llvm-api-tests/vsuxseg4ei32.c +++ b/auto-generated/llvm-api-tests/vsuxseg4ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxseg4ei64.c b/auto-generated/llvm-api-tests/vsuxseg4ei64.c index 97fa6e560..a5c5c80c9 100644 --- a/auto-generated/llvm-api-tests/vsuxseg4ei64.c +++ b/auto-generated/llvm-api-tests/vsuxseg4ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxseg4ei8.c b/auto-generated/llvm-api-tests/vsuxseg4ei8.c index 0c3f9f9a8..1cbf72f08 100644 --- a/auto-generated/llvm-api-tests/vsuxseg4ei8.c +++ b/auto-generated/llvm-api-tests/vsuxseg4ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxseg5ei16.c b/auto-generated/llvm-api-tests/vsuxseg5ei16.c index c7d2c130f..37b7d69d1 100644 --- a/auto-generated/llvm-api-tests/vsuxseg5ei16.c +++ b/auto-generated/llvm-api-tests/vsuxseg5ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxseg5ei32.c b/auto-generated/llvm-api-tests/vsuxseg5ei32.c index 0757324db..d33446d6a 100644 --- a/auto-generated/llvm-api-tests/vsuxseg5ei32.c +++ b/auto-generated/llvm-api-tests/vsuxseg5ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxseg5ei64.c b/auto-generated/llvm-api-tests/vsuxseg5ei64.c index df3e43802..21f90c1f3 100644 --- a/auto-generated/llvm-api-tests/vsuxseg5ei64.c +++ b/auto-generated/llvm-api-tests/vsuxseg5ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxseg5ei8.c b/auto-generated/llvm-api-tests/vsuxseg5ei8.c index cca7d701c..6a5a0f0cb 100644 --- a/auto-generated/llvm-api-tests/vsuxseg5ei8.c +++ b/auto-generated/llvm-api-tests/vsuxseg5ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxseg6ei16.c b/auto-generated/llvm-api-tests/vsuxseg6ei16.c index af12d0fb1..c7d58f9c3 100644 --- a/auto-generated/llvm-api-tests/vsuxseg6ei16.c +++ b/auto-generated/llvm-api-tests/vsuxseg6ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxseg6ei32.c b/auto-generated/llvm-api-tests/vsuxseg6ei32.c index 521498045..c4e14b517 100644 --- a/auto-generated/llvm-api-tests/vsuxseg6ei32.c +++ b/auto-generated/llvm-api-tests/vsuxseg6ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxseg6ei64.c b/auto-generated/llvm-api-tests/vsuxseg6ei64.c index 1186d1f6f..68f194b7e 100644 --- a/auto-generated/llvm-api-tests/vsuxseg6ei64.c +++ b/auto-generated/llvm-api-tests/vsuxseg6ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxseg6ei8.c b/auto-generated/llvm-api-tests/vsuxseg6ei8.c index 2b278a6c5..c58b1adcd 100644 --- a/auto-generated/llvm-api-tests/vsuxseg6ei8.c +++ b/auto-generated/llvm-api-tests/vsuxseg6ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxseg7ei16.c b/auto-generated/llvm-api-tests/vsuxseg7ei16.c index 57301c525..696d11270 100644 --- a/auto-generated/llvm-api-tests/vsuxseg7ei16.c +++ b/auto-generated/llvm-api-tests/vsuxseg7ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxseg7ei32.c b/auto-generated/llvm-api-tests/vsuxseg7ei32.c index d7f93299a..e1016eeb9 100644 --- a/auto-generated/llvm-api-tests/vsuxseg7ei32.c +++ b/auto-generated/llvm-api-tests/vsuxseg7ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxseg7ei64.c b/auto-generated/llvm-api-tests/vsuxseg7ei64.c index 002507654..b1074ad21 100644 --- a/auto-generated/llvm-api-tests/vsuxseg7ei64.c +++ b/auto-generated/llvm-api-tests/vsuxseg7ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxseg7ei8.c b/auto-generated/llvm-api-tests/vsuxseg7ei8.c index fa2e8a9d7..b5a9c35a1 100644 --- a/auto-generated/llvm-api-tests/vsuxseg7ei8.c +++ b/auto-generated/llvm-api-tests/vsuxseg7ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxseg8ei16.c b/auto-generated/llvm-api-tests/vsuxseg8ei16.c index 304d2edb1..f085e63ef 100644 --- a/auto-generated/llvm-api-tests/vsuxseg8ei16.c +++ b/auto-generated/llvm-api-tests/vsuxseg8ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxseg8ei32.c b/auto-generated/llvm-api-tests/vsuxseg8ei32.c index fedc7ff6a..e8493ff88 100644 --- a/auto-generated/llvm-api-tests/vsuxseg8ei32.c +++ b/auto-generated/llvm-api-tests/vsuxseg8ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxseg8ei64.c b/auto-generated/llvm-api-tests/vsuxseg8ei64.c index b1fc4b44a..12a3871a3 100644 --- a/auto-generated/llvm-api-tests/vsuxseg8ei64.c +++ b/auto-generated/llvm-api-tests/vsuxseg8ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vsuxseg8ei8.c b/auto-generated/llvm-api-tests/vsuxseg8ei8.c index c257d7173..f891c5967 100644 --- a/auto-generated/llvm-api-tests/vsuxseg8ei8.c +++ b/auto-generated/llvm-api-tests/vsuxseg8ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vundefined.c b/auto-generated/llvm-api-tests/vundefined.c index f7b50440b..a3460c1fd 100644 --- a/auto-generated/llvm-api-tests/vundefined.c +++ b/auto-generated/llvm-api-tests/vundefined.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vwmacc.c b/auto-generated/llvm-api-tests/vwmacc.c index 5a0042ebc..6e2e9d32f 100644 --- a/auto-generated/llvm-api-tests/vwmacc.c +++ b/auto-generated/llvm-api-tests/vwmacc.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vwmaccsu.c b/auto-generated/llvm-api-tests/vwmaccsu.c index c173570f9..3596e6de1 100644 --- a/auto-generated/llvm-api-tests/vwmaccsu.c +++ b/auto-generated/llvm-api-tests/vwmaccsu.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vwmaccu.c b/auto-generated/llvm-api-tests/vwmaccu.c index 6a30fe830..63d391940 100644 --- a/auto-generated/llvm-api-tests/vwmaccu.c +++ b/auto-generated/llvm-api-tests/vwmaccu.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-api-tests/vwmaccus.c b/auto-generated/llvm-api-tests/vwmaccus.c index 0767086af..4d8024f46 100644 --- a/auto-generated/llvm-api-tests/vwmaccus.c +++ b/auto-generated/llvm-api-tests/vwmaccus.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vcompress.c b/auto-generated/llvm-overloaded-tests/vcompress.c index a4512fb8d..4df3040c4 100644 --- a/auto-generated/llvm-overloaded-tests/vcompress.c +++ b/auto-generated/llvm-overloaded-tests/vcompress.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vcpop.c b/auto-generated/llvm-overloaded-tests/vcpop.c index 1b3ed7bec..398d61799 100644 --- a/auto-generated/llvm-overloaded-tests/vcpop.c +++ b/auto-generated/llvm-overloaded-tests/vcpop.c @@ -1,63 +1,63 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -unsigned int test_vcpop_m_b1(vbool1_t vs2, size_t vl) { +unsigned long test_vcpop_m_b1(vbool1_t vs2, size_t vl) { return __riscv_vcpop(vs2, vl); } -unsigned int test_vcpop_m_b2(vbool2_t vs2, size_t vl) { +unsigned long test_vcpop_m_b2(vbool2_t vs2, size_t vl) { return __riscv_vcpop(vs2, vl); } -unsigned int test_vcpop_m_b4(vbool4_t vs2, size_t vl) { +unsigned long test_vcpop_m_b4(vbool4_t vs2, size_t vl) { return __riscv_vcpop(vs2, vl); } -unsigned int test_vcpop_m_b8(vbool8_t vs2, size_t vl) { +unsigned long test_vcpop_m_b8(vbool8_t vs2, size_t vl) { return __riscv_vcpop(vs2, vl); } -unsigned int test_vcpop_m_b16(vbool16_t vs2, size_t vl) { +unsigned long test_vcpop_m_b16(vbool16_t vs2, size_t vl) { return __riscv_vcpop(vs2, vl); } -unsigned int test_vcpop_m_b32(vbool32_t vs2, size_t vl) { +unsigned long test_vcpop_m_b32(vbool32_t vs2, size_t vl) { return __riscv_vcpop(vs2, vl); } -unsigned int test_vcpop_m_b64(vbool64_t vs2, size_t vl) { +unsigned long test_vcpop_m_b64(vbool64_t vs2, size_t vl) { return __riscv_vcpop(vs2, vl); } -unsigned int test_vcpop_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { +unsigned long test_vcpop_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { return __riscv_vcpop(vm, vs2, vl); } -unsigned int test_vcpop_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { +unsigned long test_vcpop_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { return __riscv_vcpop(vm, vs2, vl); } -unsigned int test_vcpop_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { +unsigned long test_vcpop_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { return __riscv_vcpop(vm, vs2, vl); } -unsigned int test_vcpop_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { +unsigned long test_vcpop_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { return __riscv_vcpop(vm, vs2, vl); } -unsigned int test_vcpop_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { +unsigned long test_vcpop_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { return __riscv_vcpop(vm, vs2, vl); } -unsigned int test_vcpop_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { +unsigned long test_vcpop_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { return __riscv_vcpop(vm, vs2, vl); } -unsigned int test_vcpop_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { +unsigned long test_vcpop_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { return __riscv_vcpop(vm, vs2, vl); } diff --git a/auto-generated/llvm-overloaded-tests/vfabs.c b/auto-generated/llvm-overloaded-tests/vfabs.c index a2c4d5442..d6393b0a4 100644 --- a/auto-generated/llvm-overloaded-tests/vfabs.c +++ b/auto-generated/llvm-overloaded-tests/vfabs.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfadd.c b/auto-generated/llvm-overloaded-tests/vfadd.c index bd8693ea3..7ae285433 100644 --- a/auto-generated/llvm-overloaded-tests/vfadd.c +++ b/auto-generated/llvm-overloaded-tests/vfadd.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfclass.c b/auto-generated/llvm-overloaded-tests/vfclass.c index bb71c8010..463428175 100644 --- a/auto-generated/llvm-overloaded-tests/vfclass.c +++ b/auto-generated/llvm-overloaded-tests/vfclass.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfcvt.c b/auto-generated/llvm-overloaded-tests/vfcvt.c index a3b886e48..1f6a77c10 100644 --- a/auto-generated/llvm-overloaded-tests/vfcvt.c +++ b/auto-generated/llvm-overloaded-tests/vfcvt.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfcvt_rtz.c b/auto-generated/llvm-overloaded-tests/vfcvt_rtz.c index 14aa62562..3fc8c5210 100644 --- a/auto-generated/llvm-overloaded-tests/vfcvt_rtz.c +++ b/auto-generated/llvm-overloaded-tests/vfcvt_rtz.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfdiv.c b/auto-generated/llvm-overloaded-tests/vfdiv.c index 540a49829..47341253d 100644 --- a/auto-generated/llvm-overloaded-tests/vfdiv.c +++ b/auto-generated/llvm-overloaded-tests/vfdiv.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfirst.c b/auto-generated/llvm-overloaded-tests/vfirst.c index 481e07716..c8f2b1b12 100644 --- a/auto-generated/llvm-overloaded-tests/vfirst.c +++ b/auto-generated/llvm-overloaded-tests/vfirst.c @@ -5,58 +5,58 @@ #include -int test_vfirst_m_b1(vbool1_t vs2, size_t vl) { +long test_vfirst_m_b1(vbool1_t vs2, size_t vl) { return __riscv_vfirst(vs2, vl); } -int test_vfirst_m_b2(vbool2_t vs2, size_t vl) { +long test_vfirst_m_b2(vbool2_t vs2, size_t vl) { return __riscv_vfirst(vs2, vl); } -int test_vfirst_m_b4(vbool4_t vs2, size_t vl) { +long test_vfirst_m_b4(vbool4_t vs2, size_t vl) { return __riscv_vfirst(vs2, vl); } -int test_vfirst_m_b8(vbool8_t vs2, size_t vl) { +long test_vfirst_m_b8(vbool8_t vs2, size_t vl) { return __riscv_vfirst(vs2, vl); } -int test_vfirst_m_b16(vbool16_t vs2, size_t vl) { +long test_vfirst_m_b16(vbool16_t vs2, size_t vl) { return __riscv_vfirst(vs2, vl); } -int test_vfirst_m_b32(vbool32_t vs2, size_t vl) { +long test_vfirst_m_b32(vbool32_t vs2, size_t vl) { return __riscv_vfirst(vs2, vl); } -int test_vfirst_m_b64(vbool64_t vs2, size_t vl) { +long test_vfirst_m_b64(vbool64_t vs2, size_t vl) { return __riscv_vfirst(vs2, vl); } -int test_vfirst_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { +long test_vfirst_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { return __riscv_vfirst(vm, vs2, vl); } -int test_vfirst_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { +long test_vfirst_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { return __riscv_vfirst(vm, vs2, vl); } -int test_vfirst_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { +long test_vfirst_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { return __riscv_vfirst(vm, vs2, vl); } -int test_vfirst_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { +long test_vfirst_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { return __riscv_vfirst(vm, vs2, vl); } -int test_vfirst_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { +long test_vfirst_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { return __riscv_vfirst(vm, vs2, vl); } -int test_vfirst_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { +long test_vfirst_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { return __riscv_vfirst(vm, vs2, vl); } -int test_vfirst_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { +long test_vfirst_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { return __riscv_vfirst(vm, vs2, vl); } diff --git a/auto-generated/llvm-overloaded-tests/vfmacc.c b/auto-generated/llvm-overloaded-tests/vfmacc.c index 45a5a0595..4940a63b2 100644 --- a/auto-generated/llvm-overloaded-tests/vfmacc.c +++ b/auto-generated/llvm-overloaded-tests/vfmacc.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfmadd.c b/auto-generated/llvm-overloaded-tests/vfmadd.c index 7654e9deb..5d6d7950b 100644 --- a/auto-generated/llvm-overloaded-tests/vfmadd.c +++ b/auto-generated/llvm-overloaded-tests/vfmadd.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfmax.c b/auto-generated/llvm-overloaded-tests/vfmax.c index e9640db4f..201526fd7 100644 --- a/auto-generated/llvm-overloaded-tests/vfmax.c +++ b/auto-generated/llvm-overloaded-tests/vfmax.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfmerge.c b/auto-generated/llvm-overloaded-tests/vfmerge.c index d56efe29e..9c3c75165 100644 --- a/auto-generated/llvm-overloaded-tests/vfmerge.c +++ b/auto-generated/llvm-overloaded-tests/vfmerge.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfmin.c b/auto-generated/llvm-overloaded-tests/vfmin.c index 71e2667e2..60858d911 100644 --- a/auto-generated/llvm-overloaded-tests/vfmin.c +++ b/auto-generated/llvm-overloaded-tests/vfmin.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfmsac.c b/auto-generated/llvm-overloaded-tests/vfmsac.c index a9d3ebc37..13a86a992 100644 --- a/auto-generated/llvm-overloaded-tests/vfmsac.c +++ b/auto-generated/llvm-overloaded-tests/vfmsac.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfmsub.c b/auto-generated/llvm-overloaded-tests/vfmsub.c index d72ec3437..95ca69593 100644 --- a/auto-generated/llvm-overloaded-tests/vfmsub.c +++ b/auto-generated/llvm-overloaded-tests/vfmsub.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfmul.c b/auto-generated/llvm-overloaded-tests/vfmul.c index e888bfb60..eba9a536f 100644 --- a/auto-generated/llvm-overloaded-tests/vfmul.c +++ b/auto-generated/llvm-overloaded-tests/vfmul.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfmv.c b/auto-generated/llvm-overloaded-tests/vfmv.c index 135004709..6d3a56d2d 100644 --- a/auto-generated/llvm-overloaded-tests/vfmv.c +++ b/auto-generated/llvm-overloaded-tests/vfmv.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfncvt.c b/auto-generated/llvm-overloaded-tests/vfncvt.c index 35ea978f6..3dc050ce1 100644 --- a/auto-generated/llvm-overloaded-tests/vfncvt.c +++ b/auto-generated/llvm-overloaded-tests/vfncvt.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfncvt_rod.c b/auto-generated/llvm-overloaded-tests/vfncvt_rod.c index a3711225c..2952eb3fe 100644 --- a/auto-generated/llvm-overloaded-tests/vfncvt_rod.c +++ b/auto-generated/llvm-overloaded-tests/vfncvt_rod.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfncvt_rtz.c b/auto-generated/llvm-overloaded-tests/vfncvt_rtz.c index 915b2adce..cf744436c 100644 --- a/auto-generated/llvm-overloaded-tests/vfncvt_rtz.c +++ b/auto-generated/llvm-overloaded-tests/vfncvt_rtz.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfneg.c b/auto-generated/llvm-overloaded-tests/vfneg.c index b52ab210e..de124620a 100644 --- a/auto-generated/llvm-overloaded-tests/vfneg.c +++ b/auto-generated/llvm-overloaded-tests/vfneg.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfnmacc.c b/auto-generated/llvm-overloaded-tests/vfnmacc.c index 7178799d2..72f022079 100644 --- a/auto-generated/llvm-overloaded-tests/vfnmacc.c +++ b/auto-generated/llvm-overloaded-tests/vfnmacc.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfnmadd.c b/auto-generated/llvm-overloaded-tests/vfnmadd.c index 877783403..80a7867c4 100644 --- a/auto-generated/llvm-overloaded-tests/vfnmadd.c +++ b/auto-generated/llvm-overloaded-tests/vfnmadd.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfnmsac.c b/auto-generated/llvm-overloaded-tests/vfnmsac.c index ddefdf376..41b00f277 100644 --- a/auto-generated/llvm-overloaded-tests/vfnmsac.c +++ b/auto-generated/llvm-overloaded-tests/vfnmsac.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfnmsub.c b/auto-generated/llvm-overloaded-tests/vfnmsub.c index 544253fad..dd5e84792 100644 --- a/auto-generated/llvm-overloaded-tests/vfnmsub.c +++ b/auto-generated/llvm-overloaded-tests/vfnmsub.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfrdiv.c b/auto-generated/llvm-overloaded-tests/vfrdiv.c index 5d9a182f3..1870ff88b 100644 --- a/auto-generated/llvm-overloaded-tests/vfrdiv.c +++ b/auto-generated/llvm-overloaded-tests/vfrdiv.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfrec7.c b/auto-generated/llvm-overloaded-tests/vfrec7.c index e92a5277e..1605f6ed5 100644 --- a/auto-generated/llvm-overloaded-tests/vfrec7.c +++ b/auto-generated/llvm-overloaded-tests/vfrec7.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfredmax.c b/auto-generated/llvm-overloaded-tests/vfredmax.c index c735c8ebd..13142afdd 100644 --- a/auto-generated/llvm-overloaded-tests/vfredmax.c +++ b/auto-generated/llvm-overloaded-tests/vfredmax.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfredmin.c b/auto-generated/llvm-overloaded-tests/vfredmin.c index 66cbbb40e..34d03f78f 100644 --- a/auto-generated/llvm-overloaded-tests/vfredmin.c +++ b/auto-generated/llvm-overloaded-tests/vfredmin.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfredosum.c b/auto-generated/llvm-overloaded-tests/vfredosum.c index d6153aadf..255cbf849 100644 --- a/auto-generated/llvm-overloaded-tests/vfredosum.c +++ b/auto-generated/llvm-overloaded-tests/vfredosum.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfredusum.c b/auto-generated/llvm-overloaded-tests/vfredusum.c index 5033c2df1..07470d588 100644 --- a/auto-generated/llvm-overloaded-tests/vfredusum.c +++ b/auto-generated/llvm-overloaded-tests/vfredusum.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfrsqrt7.c b/auto-generated/llvm-overloaded-tests/vfrsqrt7.c index dd3962142..6768629df 100644 --- a/auto-generated/llvm-overloaded-tests/vfrsqrt7.c +++ b/auto-generated/llvm-overloaded-tests/vfrsqrt7.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfrsub.c b/auto-generated/llvm-overloaded-tests/vfrsub.c index 61765630a..800dc6450 100644 --- a/auto-generated/llvm-overloaded-tests/vfrsub.c +++ b/auto-generated/llvm-overloaded-tests/vfrsub.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfsgnj.c b/auto-generated/llvm-overloaded-tests/vfsgnj.c index 7ed26f68a..a7bbcff28 100644 --- a/auto-generated/llvm-overloaded-tests/vfsgnj.c +++ b/auto-generated/llvm-overloaded-tests/vfsgnj.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfsgnjn.c b/auto-generated/llvm-overloaded-tests/vfsgnjn.c index 65d68509f..7d7af786e 100644 --- a/auto-generated/llvm-overloaded-tests/vfsgnjn.c +++ b/auto-generated/llvm-overloaded-tests/vfsgnjn.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfsgnjx.c b/auto-generated/llvm-overloaded-tests/vfsgnjx.c index bf97feddc..83660cd22 100644 --- a/auto-generated/llvm-overloaded-tests/vfsgnjx.c +++ b/auto-generated/llvm-overloaded-tests/vfsgnjx.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfslide1down.c b/auto-generated/llvm-overloaded-tests/vfslide1down.c index 2f4aeae73..47d050d49 100644 --- a/auto-generated/llvm-overloaded-tests/vfslide1down.c +++ b/auto-generated/llvm-overloaded-tests/vfslide1down.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfslide1up.c b/auto-generated/llvm-overloaded-tests/vfslide1up.c index 909bbd433..c0f1cfacc 100644 --- a/auto-generated/llvm-overloaded-tests/vfslide1up.c +++ b/auto-generated/llvm-overloaded-tests/vfslide1up.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfsqrt.c b/auto-generated/llvm-overloaded-tests/vfsqrt.c index 26259bbd3..73d525ff0 100644 --- a/auto-generated/llvm-overloaded-tests/vfsqrt.c +++ b/auto-generated/llvm-overloaded-tests/vfsqrt.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfsub.c b/auto-generated/llvm-overloaded-tests/vfsub.c index 813c0cba4..8528931f3 100644 --- a/auto-generated/llvm-overloaded-tests/vfsub.c +++ b/auto-generated/llvm-overloaded-tests/vfsub.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfwadd.c b/auto-generated/llvm-overloaded-tests/vfwadd.c index 341d12742..95f8f704a 100644 --- a/auto-generated/llvm-overloaded-tests/vfwadd.c +++ b/auto-generated/llvm-overloaded-tests/vfwadd.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfwcvt.c b/auto-generated/llvm-overloaded-tests/vfwcvt.c index 6261c8b80..8d3caf6c6 100644 --- a/auto-generated/llvm-overloaded-tests/vfwcvt.c +++ b/auto-generated/llvm-overloaded-tests/vfwcvt.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfwcvt_rtz.c b/auto-generated/llvm-overloaded-tests/vfwcvt_rtz.c index 724772cbb..8418d8198 100644 --- a/auto-generated/llvm-overloaded-tests/vfwcvt_rtz.c +++ b/auto-generated/llvm-overloaded-tests/vfwcvt_rtz.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfwmacc.c b/auto-generated/llvm-overloaded-tests/vfwmacc.c index 59767265b..1a21fe6d4 100644 --- a/auto-generated/llvm-overloaded-tests/vfwmacc.c +++ b/auto-generated/llvm-overloaded-tests/vfwmacc.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfwmsac.c b/auto-generated/llvm-overloaded-tests/vfwmsac.c index 22f93687c..b10e205b6 100644 --- a/auto-generated/llvm-overloaded-tests/vfwmsac.c +++ b/auto-generated/llvm-overloaded-tests/vfwmsac.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfwmul.c b/auto-generated/llvm-overloaded-tests/vfwmul.c index 73a8daa59..e805d2502 100644 --- a/auto-generated/llvm-overloaded-tests/vfwmul.c +++ b/auto-generated/llvm-overloaded-tests/vfwmul.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfwnmacc.c b/auto-generated/llvm-overloaded-tests/vfwnmacc.c index 6bfd0daa4..e13627543 100644 --- a/auto-generated/llvm-overloaded-tests/vfwnmacc.c +++ b/auto-generated/llvm-overloaded-tests/vfwnmacc.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfwnmsac.c b/auto-generated/llvm-overloaded-tests/vfwnmsac.c index 9dcc36b38..f4a4f6dc2 100644 --- a/auto-generated/llvm-overloaded-tests/vfwnmsac.c +++ b/auto-generated/llvm-overloaded-tests/vfwnmsac.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfwredosum.c b/auto-generated/llvm-overloaded-tests/vfwredosum.c index a60d91752..ca145ab4f 100644 --- a/auto-generated/llvm-overloaded-tests/vfwredosum.c +++ b/auto-generated/llvm-overloaded-tests/vfwredosum.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfwredusum.c b/auto-generated/llvm-overloaded-tests/vfwredusum.c index e240301fe..0a6a05e62 100644 --- a/auto-generated/llvm-overloaded-tests/vfwredusum.c +++ b/auto-generated/llvm-overloaded-tests/vfwredusum.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vfwsub.c b/auto-generated/llvm-overloaded-tests/vfwsub.c index ad9c26a53..24905be84 100644 --- a/auto-generated/llvm-overloaded-tests/vfwsub.c +++ b/auto-generated/llvm-overloaded-tests/vfwsub.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vget.c b/auto-generated/llvm-overloaded-tests/vget.c index 39c7d99bc..b09d9cdb5 100644 --- a/auto-generated/llvm-overloaded-tests/vget.c +++ b/auto-generated/llvm-overloaded-tests/vget.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vle16.c b/auto-generated/llvm-overloaded-tests/vle16.c index 749ac3592..9d00e5441 100644 --- a/auto-generated/llvm-overloaded-tests/vle16.c +++ b/auto-generated/llvm-overloaded-tests/vle16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vle16ff.c b/auto-generated/llvm-overloaded-tests/vle16ff.c index 97d95ae5d..be90298f5 100644 --- a/auto-generated/llvm-overloaded-tests/vle16ff.c +++ b/auto-generated/llvm-overloaded-tests/vle16ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vle32.c b/auto-generated/llvm-overloaded-tests/vle32.c index bcc9e979b..5d07eea5f 100644 --- a/auto-generated/llvm-overloaded-tests/vle32.c +++ b/auto-generated/llvm-overloaded-tests/vle32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vle32ff.c b/auto-generated/llvm-overloaded-tests/vle32ff.c index 5c27ffa5b..6bcc8618d 100644 --- a/auto-generated/llvm-overloaded-tests/vle32ff.c +++ b/auto-generated/llvm-overloaded-tests/vle32ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vle64.c b/auto-generated/llvm-overloaded-tests/vle64.c index fb1a02d62..900898e7a 100644 --- a/auto-generated/llvm-overloaded-tests/vle64.c +++ b/auto-generated/llvm-overloaded-tests/vle64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vle64ff.c b/auto-generated/llvm-overloaded-tests/vle64ff.c index 695f54616..a4eaa25f0 100644 --- a/auto-generated/llvm-overloaded-tests/vle64ff.c +++ b/auto-generated/llvm-overloaded-tests/vle64ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vle8.c b/auto-generated/llvm-overloaded-tests/vle8.c index e5f2c66b5..ea2f4f8e0 100644 --- a/auto-generated/llvm-overloaded-tests/vle8.c +++ b/auto-generated/llvm-overloaded-tests/vle8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vle8ff.c b/auto-generated/llvm-overloaded-tests/vle8ff.c index 86a953132..d0b7e9032 100644 --- a/auto-generated/llvm-overloaded-tests/vle8ff.c +++ b/auto-generated/llvm-overloaded-tests/vle8ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlmul_ext_v.c b/auto-generated/llvm-overloaded-tests/vlmul_ext_v.c index ecee0f39f..65899d290 100644 --- a/auto-generated/llvm-overloaded-tests/vlmul_ext_v.c +++ b/auto-generated/llvm-overloaded-tests/vlmul_ext_v.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlmul_trunc_v.c b/auto-generated/llvm-overloaded-tests/vlmul_trunc_v.c index 519de65a8..2251e7a6d 100644 --- a/auto-generated/llvm-overloaded-tests/vlmul_trunc_v.c +++ b/auto-generated/llvm-overloaded-tests/vlmul_trunc_v.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxei16.c b/auto-generated/llvm-overloaded-tests/vloxei16.c index 1081d36ec..25482aaa8 100644 --- a/auto-generated/llvm-overloaded-tests/vloxei16.c +++ b/auto-generated/llvm-overloaded-tests/vloxei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxei32.c b/auto-generated/llvm-overloaded-tests/vloxei32.c index 39c688d8d..d3d930162 100644 --- a/auto-generated/llvm-overloaded-tests/vloxei32.c +++ b/auto-generated/llvm-overloaded-tests/vloxei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxei64.c b/auto-generated/llvm-overloaded-tests/vloxei64.c index 613d9e2fb..a695ebdb6 100644 --- a/auto-generated/llvm-overloaded-tests/vloxei64.c +++ b/auto-generated/llvm-overloaded-tests/vloxei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxei8.c b/auto-generated/llvm-overloaded-tests/vloxei8.c index e7d0e59f6..6606b8e7c 100644 --- a/auto-generated/llvm-overloaded-tests/vloxei8.c +++ b/auto-generated/llvm-overloaded-tests/vloxei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxseg2ei16.c b/auto-generated/llvm-overloaded-tests/vloxseg2ei16.c index 269cf114c..a3f4b6898 100644 --- a/auto-generated/llvm-overloaded-tests/vloxseg2ei16.c +++ b/auto-generated/llvm-overloaded-tests/vloxseg2ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxseg2ei32.c b/auto-generated/llvm-overloaded-tests/vloxseg2ei32.c index 3f81d1e2a..0f5977236 100644 --- a/auto-generated/llvm-overloaded-tests/vloxseg2ei32.c +++ b/auto-generated/llvm-overloaded-tests/vloxseg2ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxseg2ei64.c b/auto-generated/llvm-overloaded-tests/vloxseg2ei64.c index 0486dd996..fda124408 100644 --- a/auto-generated/llvm-overloaded-tests/vloxseg2ei64.c +++ b/auto-generated/llvm-overloaded-tests/vloxseg2ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxseg2ei8.c b/auto-generated/llvm-overloaded-tests/vloxseg2ei8.c index 1fc95c7b4..f812e7c4d 100644 --- a/auto-generated/llvm-overloaded-tests/vloxseg2ei8.c +++ b/auto-generated/llvm-overloaded-tests/vloxseg2ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxseg3ei16.c b/auto-generated/llvm-overloaded-tests/vloxseg3ei16.c index 0567fb628..e404894f4 100644 --- a/auto-generated/llvm-overloaded-tests/vloxseg3ei16.c +++ b/auto-generated/llvm-overloaded-tests/vloxseg3ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxseg3ei32.c b/auto-generated/llvm-overloaded-tests/vloxseg3ei32.c index 2b2bd958c..699c6ff47 100644 --- a/auto-generated/llvm-overloaded-tests/vloxseg3ei32.c +++ b/auto-generated/llvm-overloaded-tests/vloxseg3ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxseg3ei64.c b/auto-generated/llvm-overloaded-tests/vloxseg3ei64.c index 1bdd17d9f..886e2f7b5 100644 --- a/auto-generated/llvm-overloaded-tests/vloxseg3ei64.c +++ b/auto-generated/llvm-overloaded-tests/vloxseg3ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxseg3ei8.c b/auto-generated/llvm-overloaded-tests/vloxseg3ei8.c index 73138f3ea..9b148013b 100644 --- a/auto-generated/llvm-overloaded-tests/vloxseg3ei8.c +++ b/auto-generated/llvm-overloaded-tests/vloxseg3ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxseg4ei16.c b/auto-generated/llvm-overloaded-tests/vloxseg4ei16.c index 565458e86..cbbb01671 100644 --- a/auto-generated/llvm-overloaded-tests/vloxseg4ei16.c +++ b/auto-generated/llvm-overloaded-tests/vloxseg4ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxseg4ei32.c b/auto-generated/llvm-overloaded-tests/vloxseg4ei32.c index 4b0c40715..0264d7fd0 100644 --- a/auto-generated/llvm-overloaded-tests/vloxseg4ei32.c +++ b/auto-generated/llvm-overloaded-tests/vloxseg4ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxseg4ei64.c b/auto-generated/llvm-overloaded-tests/vloxseg4ei64.c index 9c12a3a6f..68b9cd4b0 100644 --- a/auto-generated/llvm-overloaded-tests/vloxseg4ei64.c +++ b/auto-generated/llvm-overloaded-tests/vloxseg4ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxseg4ei8.c b/auto-generated/llvm-overloaded-tests/vloxseg4ei8.c index 1a5bb47db..4ff062ba4 100644 --- a/auto-generated/llvm-overloaded-tests/vloxseg4ei8.c +++ b/auto-generated/llvm-overloaded-tests/vloxseg4ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxseg5ei16.c b/auto-generated/llvm-overloaded-tests/vloxseg5ei16.c index e091e5395..b8e44ad3d 100644 --- a/auto-generated/llvm-overloaded-tests/vloxseg5ei16.c +++ b/auto-generated/llvm-overloaded-tests/vloxseg5ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxseg5ei32.c b/auto-generated/llvm-overloaded-tests/vloxseg5ei32.c index b50071419..e25b0332f 100644 --- a/auto-generated/llvm-overloaded-tests/vloxseg5ei32.c +++ b/auto-generated/llvm-overloaded-tests/vloxseg5ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxseg5ei64.c b/auto-generated/llvm-overloaded-tests/vloxseg5ei64.c index a96044aa7..317bf4900 100644 --- a/auto-generated/llvm-overloaded-tests/vloxseg5ei64.c +++ b/auto-generated/llvm-overloaded-tests/vloxseg5ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxseg5ei8.c b/auto-generated/llvm-overloaded-tests/vloxseg5ei8.c index b6a05a497..05e93d174 100644 --- a/auto-generated/llvm-overloaded-tests/vloxseg5ei8.c +++ b/auto-generated/llvm-overloaded-tests/vloxseg5ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxseg6ei16.c b/auto-generated/llvm-overloaded-tests/vloxseg6ei16.c index 0adc9a46e..e494ea87b 100644 --- a/auto-generated/llvm-overloaded-tests/vloxseg6ei16.c +++ b/auto-generated/llvm-overloaded-tests/vloxseg6ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxseg6ei32.c b/auto-generated/llvm-overloaded-tests/vloxseg6ei32.c index cf319c152..6642bb7a6 100644 --- a/auto-generated/llvm-overloaded-tests/vloxseg6ei32.c +++ b/auto-generated/llvm-overloaded-tests/vloxseg6ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxseg6ei64.c b/auto-generated/llvm-overloaded-tests/vloxseg6ei64.c index d5754ba87..2e6b92e32 100644 --- a/auto-generated/llvm-overloaded-tests/vloxseg6ei64.c +++ b/auto-generated/llvm-overloaded-tests/vloxseg6ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxseg6ei8.c b/auto-generated/llvm-overloaded-tests/vloxseg6ei8.c index f6220dad8..1405ffc0a 100644 --- a/auto-generated/llvm-overloaded-tests/vloxseg6ei8.c +++ b/auto-generated/llvm-overloaded-tests/vloxseg6ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxseg7ei16.c b/auto-generated/llvm-overloaded-tests/vloxseg7ei16.c index 8c1812cec..bf398d50e 100644 --- a/auto-generated/llvm-overloaded-tests/vloxseg7ei16.c +++ b/auto-generated/llvm-overloaded-tests/vloxseg7ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxseg7ei32.c b/auto-generated/llvm-overloaded-tests/vloxseg7ei32.c index 2bfa93753..dcb2cd39b 100644 --- a/auto-generated/llvm-overloaded-tests/vloxseg7ei32.c +++ b/auto-generated/llvm-overloaded-tests/vloxseg7ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxseg7ei64.c b/auto-generated/llvm-overloaded-tests/vloxseg7ei64.c index 30dbe99c8..406e28477 100644 --- a/auto-generated/llvm-overloaded-tests/vloxseg7ei64.c +++ b/auto-generated/llvm-overloaded-tests/vloxseg7ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxseg7ei8.c b/auto-generated/llvm-overloaded-tests/vloxseg7ei8.c index dfc575450..1c5430f14 100644 --- a/auto-generated/llvm-overloaded-tests/vloxseg7ei8.c +++ b/auto-generated/llvm-overloaded-tests/vloxseg7ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxseg8ei16.c b/auto-generated/llvm-overloaded-tests/vloxseg8ei16.c index daf118310..94377c5cd 100644 --- a/auto-generated/llvm-overloaded-tests/vloxseg8ei16.c +++ b/auto-generated/llvm-overloaded-tests/vloxseg8ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxseg8ei32.c b/auto-generated/llvm-overloaded-tests/vloxseg8ei32.c index cb33c2dd9..19e08c9d7 100644 --- a/auto-generated/llvm-overloaded-tests/vloxseg8ei32.c +++ b/auto-generated/llvm-overloaded-tests/vloxseg8ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxseg8ei64.c b/auto-generated/llvm-overloaded-tests/vloxseg8ei64.c index 58710c1e7..36d2f8980 100644 --- a/auto-generated/llvm-overloaded-tests/vloxseg8ei64.c +++ b/auto-generated/llvm-overloaded-tests/vloxseg8ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vloxseg8ei8.c b/auto-generated/llvm-overloaded-tests/vloxseg8ei8.c index a32bb0321..64a994622 100644 --- a/auto-generated/llvm-overloaded-tests/vloxseg8ei8.c +++ b/auto-generated/llvm-overloaded-tests/vloxseg8ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlse16.c b/auto-generated/llvm-overloaded-tests/vlse16.c index 5f305b78e..b7a309d50 100644 --- a/auto-generated/llvm-overloaded-tests/vlse16.c +++ b/auto-generated/llvm-overloaded-tests/vlse16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlse32.c b/auto-generated/llvm-overloaded-tests/vlse32.c index 649ac2a10..0c8559e42 100644 --- a/auto-generated/llvm-overloaded-tests/vlse32.c +++ b/auto-generated/llvm-overloaded-tests/vlse32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlse64.c b/auto-generated/llvm-overloaded-tests/vlse64.c index fa52c7617..74a3a1b5f 100644 --- a/auto-generated/llvm-overloaded-tests/vlse64.c +++ b/auto-generated/llvm-overloaded-tests/vlse64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg2e16.c b/auto-generated/llvm-overloaded-tests/vlseg2e16.c index 08d33e72b..43123b4f2 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg2e16.c +++ b/auto-generated/llvm-overloaded-tests/vlseg2e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg2e16ff.c b/auto-generated/llvm-overloaded-tests/vlseg2e16ff.c index dc734dda3..dbcdef126 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg2e16ff.c +++ b/auto-generated/llvm-overloaded-tests/vlseg2e16ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg2e32.c b/auto-generated/llvm-overloaded-tests/vlseg2e32.c index 550451551..3fad21873 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg2e32.c +++ b/auto-generated/llvm-overloaded-tests/vlseg2e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg2e32ff.c b/auto-generated/llvm-overloaded-tests/vlseg2e32ff.c index 63bd16be5..51bca582d 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg2e32ff.c +++ b/auto-generated/llvm-overloaded-tests/vlseg2e32ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg2e64.c b/auto-generated/llvm-overloaded-tests/vlseg2e64.c index 820c36613..65ba4c615 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg2e64.c +++ b/auto-generated/llvm-overloaded-tests/vlseg2e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg2e64ff.c b/auto-generated/llvm-overloaded-tests/vlseg2e64ff.c index 4ac04e34c..0605a6c70 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg2e64ff.c +++ b/auto-generated/llvm-overloaded-tests/vlseg2e64ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg2e8ff.c b/auto-generated/llvm-overloaded-tests/vlseg2e8ff.c index 77bcc473b..b459a944e 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg2e8ff.c +++ b/auto-generated/llvm-overloaded-tests/vlseg2e8ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg3e16.c b/auto-generated/llvm-overloaded-tests/vlseg3e16.c index 7623eeda1..0dac533e0 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg3e16.c +++ b/auto-generated/llvm-overloaded-tests/vlseg3e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg3e16ff.c b/auto-generated/llvm-overloaded-tests/vlseg3e16ff.c index 1055cc103..ff00cca4e 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg3e16ff.c +++ b/auto-generated/llvm-overloaded-tests/vlseg3e16ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg3e32.c b/auto-generated/llvm-overloaded-tests/vlseg3e32.c index 7c9b9b7e8..46929a58a 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg3e32.c +++ b/auto-generated/llvm-overloaded-tests/vlseg3e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg3e32ff.c b/auto-generated/llvm-overloaded-tests/vlseg3e32ff.c index 0c810add3..521c89276 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg3e32ff.c +++ b/auto-generated/llvm-overloaded-tests/vlseg3e32ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg3e64.c b/auto-generated/llvm-overloaded-tests/vlseg3e64.c index ce7f3e949..b7bea9f84 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg3e64.c +++ b/auto-generated/llvm-overloaded-tests/vlseg3e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg3e64ff.c b/auto-generated/llvm-overloaded-tests/vlseg3e64ff.c index 3d1512f96..76d58e6ce 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg3e64ff.c +++ b/auto-generated/llvm-overloaded-tests/vlseg3e64ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg3e8ff.c b/auto-generated/llvm-overloaded-tests/vlseg3e8ff.c index 1d55cb143..d803c144d 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg3e8ff.c +++ b/auto-generated/llvm-overloaded-tests/vlseg3e8ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg4e16.c b/auto-generated/llvm-overloaded-tests/vlseg4e16.c index 18cfd8f25..b56d513df 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg4e16.c +++ b/auto-generated/llvm-overloaded-tests/vlseg4e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg4e16ff.c b/auto-generated/llvm-overloaded-tests/vlseg4e16ff.c index 5245a07aa..5c5ecb284 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg4e16ff.c +++ b/auto-generated/llvm-overloaded-tests/vlseg4e16ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg4e32.c b/auto-generated/llvm-overloaded-tests/vlseg4e32.c index 680d9c27d..abca43619 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg4e32.c +++ b/auto-generated/llvm-overloaded-tests/vlseg4e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg4e32ff.c b/auto-generated/llvm-overloaded-tests/vlseg4e32ff.c index 59ca02f08..2d60d68e2 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg4e32ff.c +++ b/auto-generated/llvm-overloaded-tests/vlseg4e32ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg4e64.c b/auto-generated/llvm-overloaded-tests/vlseg4e64.c index db96f64c5..36c73f025 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg4e64.c +++ b/auto-generated/llvm-overloaded-tests/vlseg4e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg4e64ff.c b/auto-generated/llvm-overloaded-tests/vlseg4e64ff.c index 5d4d6a334..0a3e259c4 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg4e64ff.c +++ b/auto-generated/llvm-overloaded-tests/vlseg4e64ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg4e8ff.c b/auto-generated/llvm-overloaded-tests/vlseg4e8ff.c index aaaf44274..afe2a86c4 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg4e8ff.c +++ b/auto-generated/llvm-overloaded-tests/vlseg4e8ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg5e16.c b/auto-generated/llvm-overloaded-tests/vlseg5e16.c index db59693df..cf12ab878 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg5e16.c +++ b/auto-generated/llvm-overloaded-tests/vlseg5e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg5e16ff.c b/auto-generated/llvm-overloaded-tests/vlseg5e16ff.c index eac27f077..0eb2db171 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg5e16ff.c +++ b/auto-generated/llvm-overloaded-tests/vlseg5e16ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg5e32.c b/auto-generated/llvm-overloaded-tests/vlseg5e32.c index 8a6edc9d9..ac6f6f7ac 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg5e32.c +++ b/auto-generated/llvm-overloaded-tests/vlseg5e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg5e32ff.c b/auto-generated/llvm-overloaded-tests/vlseg5e32ff.c index 6a108e153..6611bd5e5 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg5e32ff.c +++ b/auto-generated/llvm-overloaded-tests/vlseg5e32ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg5e64.c b/auto-generated/llvm-overloaded-tests/vlseg5e64.c index 7585dbbd4..143bace06 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg5e64.c +++ b/auto-generated/llvm-overloaded-tests/vlseg5e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg5e64ff.c b/auto-generated/llvm-overloaded-tests/vlseg5e64ff.c index b676743c8..f9d4b927a 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg5e64ff.c +++ b/auto-generated/llvm-overloaded-tests/vlseg5e64ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg5e8ff.c b/auto-generated/llvm-overloaded-tests/vlseg5e8ff.c index 2cb9d9558..02dd1893e 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg5e8ff.c +++ b/auto-generated/llvm-overloaded-tests/vlseg5e8ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg6e16.c b/auto-generated/llvm-overloaded-tests/vlseg6e16.c index c92c7156c..455974c84 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg6e16.c +++ b/auto-generated/llvm-overloaded-tests/vlseg6e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg6e16ff.c b/auto-generated/llvm-overloaded-tests/vlseg6e16ff.c index d1b5df359..4d8a1967f 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg6e16ff.c +++ b/auto-generated/llvm-overloaded-tests/vlseg6e16ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg6e32.c b/auto-generated/llvm-overloaded-tests/vlseg6e32.c index 91e480a9a..61165e5c0 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg6e32.c +++ b/auto-generated/llvm-overloaded-tests/vlseg6e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg6e32ff.c b/auto-generated/llvm-overloaded-tests/vlseg6e32ff.c index 7409919a1..2f8f442da 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg6e32ff.c +++ b/auto-generated/llvm-overloaded-tests/vlseg6e32ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg6e64.c b/auto-generated/llvm-overloaded-tests/vlseg6e64.c index 1784d858c..ea6cd972d 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg6e64.c +++ b/auto-generated/llvm-overloaded-tests/vlseg6e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg6e64ff.c b/auto-generated/llvm-overloaded-tests/vlseg6e64ff.c index d60289dc6..1fe49ef31 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg6e64ff.c +++ b/auto-generated/llvm-overloaded-tests/vlseg6e64ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg6e8ff.c b/auto-generated/llvm-overloaded-tests/vlseg6e8ff.c index 6551b8831..4b507acfc 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg6e8ff.c +++ b/auto-generated/llvm-overloaded-tests/vlseg6e8ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg7e16.c b/auto-generated/llvm-overloaded-tests/vlseg7e16.c index 90ed1604f..f24c74d6a 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg7e16.c +++ b/auto-generated/llvm-overloaded-tests/vlseg7e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg7e16ff.c b/auto-generated/llvm-overloaded-tests/vlseg7e16ff.c index 0d184ce4e..f6181cfc8 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg7e16ff.c +++ b/auto-generated/llvm-overloaded-tests/vlseg7e16ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg7e32.c b/auto-generated/llvm-overloaded-tests/vlseg7e32.c index 2f49b46e1..7f98184fd 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg7e32.c +++ b/auto-generated/llvm-overloaded-tests/vlseg7e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg7e32ff.c b/auto-generated/llvm-overloaded-tests/vlseg7e32ff.c index 8bcc421de..dcc55e78d 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg7e32ff.c +++ b/auto-generated/llvm-overloaded-tests/vlseg7e32ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg7e64.c b/auto-generated/llvm-overloaded-tests/vlseg7e64.c index 7f47d8751..1b94baa05 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg7e64.c +++ b/auto-generated/llvm-overloaded-tests/vlseg7e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg7e64ff.c b/auto-generated/llvm-overloaded-tests/vlseg7e64ff.c index b137dea51..4bd127e77 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg7e64ff.c +++ b/auto-generated/llvm-overloaded-tests/vlseg7e64ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg7e8ff.c b/auto-generated/llvm-overloaded-tests/vlseg7e8ff.c index 6ea930980..a348746c2 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg7e8ff.c +++ b/auto-generated/llvm-overloaded-tests/vlseg7e8ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg8e16.c b/auto-generated/llvm-overloaded-tests/vlseg8e16.c index c495ac5fa..b59d41c2d 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg8e16.c +++ b/auto-generated/llvm-overloaded-tests/vlseg8e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg8e16ff.c b/auto-generated/llvm-overloaded-tests/vlseg8e16ff.c index e3a3021f2..59b0d1d44 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg8e16ff.c +++ b/auto-generated/llvm-overloaded-tests/vlseg8e16ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg8e32.c b/auto-generated/llvm-overloaded-tests/vlseg8e32.c index 259117dbc..2c2395d8d 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg8e32.c +++ b/auto-generated/llvm-overloaded-tests/vlseg8e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg8e32ff.c b/auto-generated/llvm-overloaded-tests/vlseg8e32ff.c index af4cf6605..d21fe3ad9 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg8e32ff.c +++ b/auto-generated/llvm-overloaded-tests/vlseg8e32ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg8e64.c b/auto-generated/llvm-overloaded-tests/vlseg8e64.c index 7a4029725..c0531927d 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg8e64.c +++ b/auto-generated/llvm-overloaded-tests/vlseg8e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg8e64ff.c b/auto-generated/llvm-overloaded-tests/vlseg8e64ff.c index 53623df1e..0726b8a57 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg8e64ff.c +++ b/auto-generated/llvm-overloaded-tests/vlseg8e64ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlseg8e8ff.c b/auto-generated/llvm-overloaded-tests/vlseg8e8ff.c index 1a1b6f5c1..544085569 100644 --- a/auto-generated/llvm-overloaded-tests/vlseg8e8ff.c +++ b/auto-generated/llvm-overloaded-tests/vlseg8e8ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlsseg2e16.c b/auto-generated/llvm-overloaded-tests/vlsseg2e16.c index 480ec7aaa..c98966d4e 100644 --- a/auto-generated/llvm-overloaded-tests/vlsseg2e16.c +++ b/auto-generated/llvm-overloaded-tests/vlsseg2e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlsseg2e32.c b/auto-generated/llvm-overloaded-tests/vlsseg2e32.c index e8081e9aa..a175490f9 100644 --- a/auto-generated/llvm-overloaded-tests/vlsseg2e32.c +++ b/auto-generated/llvm-overloaded-tests/vlsseg2e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlsseg2e64.c b/auto-generated/llvm-overloaded-tests/vlsseg2e64.c index ed62c1a50..626ec658f 100644 --- a/auto-generated/llvm-overloaded-tests/vlsseg2e64.c +++ b/auto-generated/llvm-overloaded-tests/vlsseg2e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlsseg3e16.c b/auto-generated/llvm-overloaded-tests/vlsseg3e16.c index 267d870f7..3132ba71e 100644 --- a/auto-generated/llvm-overloaded-tests/vlsseg3e16.c +++ b/auto-generated/llvm-overloaded-tests/vlsseg3e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlsseg3e32.c b/auto-generated/llvm-overloaded-tests/vlsseg3e32.c index b48a51834..8104bddb5 100644 --- a/auto-generated/llvm-overloaded-tests/vlsseg3e32.c +++ b/auto-generated/llvm-overloaded-tests/vlsseg3e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlsseg3e64.c b/auto-generated/llvm-overloaded-tests/vlsseg3e64.c index ce39382ce..ebeccd31f 100644 --- a/auto-generated/llvm-overloaded-tests/vlsseg3e64.c +++ b/auto-generated/llvm-overloaded-tests/vlsseg3e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlsseg4e16.c b/auto-generated/llvm-overloaded-tests/vlsseg4e16.c index c39a10cea..1bd9ebd9e 100644 --- a/auto-generated/llvm-overloaded-tests/vlsseg4e16.c +++ b/auto-generated/llvm-overloaded-tests/vlsseg4e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlsseg4e32.c b/auto-generated/llvm-overloaded-tests/vlsseg4e32.c index 796fb6633..a1591a990 100644 --- a/auto-generated/llvm-overloaded-tests/vlsseg4e32.c +++ b/auto-generated/llvm-overloaded-tests/vlsseg4e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlsseg4e64.c b/auto-generated/llvm-overloaded-tests/vlsseg4e64.c index d45a5087e..e1bc84938 100644 --- a/auto-generated/llvm-overloaded-tests/vlsseg4e64.c +++ b/auto-generated/llvm-overloaded-tests/vlsseg4e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlsseg5e16.c b/auto-generated/llvm-overloaded-tests/vlsseg5e16.c index d987c30f9..1b94b0253 100644 --- a/auto-generated/llvm-overloaded-tests/vlsseg5e16.c +++ b/auto-generated/llvm-overloaded-tests/vlsseg5e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlsseg5e32.c b/auto-generated/llvm-overloaded-tests/vlsseg5e32.c index bf2fb2111..35a2d1dfc 100644 --- a/auto-generated/llvm-overloaded-tests/vlsseg5e32.c +++ b/auto-generated/llvm-overloaded-tests/vlsseg5e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlsseg5e64.c b/auto-generated/llvm-overloaded-tests/vlsseg5e64.c index eea6e955c..ae51a236b 100644 --- a/auto-generated/llvm-overloaded-tests/vlsseg5e64.c +++ b/auto-generated/llvm-overloaded-tests/vlsseg5e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlsseg6e16.c b/auto-generated/llvm-overloaded-tests/vlsseg6e16.c index 69707f49d..ce9275b52 100644 --- a/auto-generated/llvm-overloaded-tests/vlsseg6e16.c +++ b/auto-generated/llvm-overloaded-tests/vlsseg6e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlsseg6e32.c b/auto-generated/llvm-overloaded-tests/vlsseg6e32.c index ee56b0c69..f64adb52e 100644 --- a/auto-generated/llvm-overloaded-tests/vlsseg6e32.c +++ b/auto-generated/llvm-overloaded-tests/vlsseg6e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlsseg6e64.c b/auto-generated/llvm-overloaded-tests/vlsseg6e64.c index f45817a6f..1c70f0ea8 100644 --- a/auto-generated/llvm-overloaded-tests/vlsseg6e64.c +++ b/auto-generated/llvm-overloaded-tests/vlsseg6e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlsseg7e16.c b/auto-generated/llvm-overloaded-tests/vlsseg7e16.c index a9bbd8deb..78de01593 100644 --- a/auto-generated/llvm-overloaded-tests/vlsseg7e16.c +++ b/auto-generated/llvm-overloaded-tests/vlsseg7e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlsseg7e32.c b/auto-generated/llvm-overloaded-tests/vlsseg7e32.c index ce9f7b793..2ccf948de 100644 --- a/auto-generated/llvm-overloaded-tests/vlsseg7e32.c +++ b/auto-generated/llvm-overloaded-tests/vlsseg7e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlsseg7e64.c b/auto-generated/llvm-overloaded-tests/vlsseg7e64.c index 2fcaa0d32..0e5fbb332 100644 --- a/auto-generated/llvm-overloaded-tests/vlsseg7e64.c +++ b/auto-generated/llvm-overloaded-tests/vlsseg7e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlsseg8e16.c b/auto-generated/llvm-overloaded-tests/vlsseg8e16.c index e53bef3e4..e21eb3972 100644 --- a/auto-generated/llvm-overloaded-tests/vlsseg8e16.c +++ b/auto-generated/llvm-overloaded-tests/vlsseg8e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlsseg8e32.c b/auto-generated/llvm-overloaded-tests/vlsseg8e32.c index 3e1747877..dc26be3ff 100644 --- a/auto-generated/llvm-overloaded-tests/vlsseg8e32.c +++ b/auto-generated/llvm-overloaded-tests/vlsseg8e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vlsseg8e64.c b/auto-generated/llvm-overloaded-tests/vlsseg8e64.c index ffccd481d..5a5dcbfde 100644 --- a/auto-generated/llvm-overloaded-tests/vlsseg8e64.c +++ b/auto-generated/llvm-overloaded-tests/vlsseg8e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxei16.c b/auto-generated/llvm-overloaded-tests/vluxei16.c index d1f5eb350..ef2e6c7ef 100644 --- a/auto-generated/llvm-overloaded-tests/vluxei16.c +++ b/auto-generated/llvm-overloaded-tests/vluxei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxei32.c b/auto-generated/llvm-overloaded-tests/vluxei32.c index 26bb8000a..892232733 100644 --- a/auto-generated/llvm-overloaded-tests/vluxei32.c +++ b/auto-generated/llvm-overloaded-tests/vluxei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxei64.c b/auto-generated/llvm-overloaded-tests/vluxei64.c index 6b2444226..9ea420314 100644 --- a/auto-generated/llvm-overloaded-tests/vluxei64.c +++ b/auto-generated/llvm-overloaded-tests/vluxei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxei8.c b/auto-generated/llvm-overloaded-tests/vluxei8.c index 37bd04886..86e9b665c 100644 --- a/auto-generated/llvm-overloaded-tests/vluxei8.c +++ b/auto-generated/llvm-overloaded-tests/vluxei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxseg2ei16.c b/auto-generated/llvm-overloaded-tests/vluxseg2ei16.c index 3d4105e8f..25c094c8c 100644 --- a/auto-generated/llvm-overloaded-tests/vluxseg2ei16.c +++ b/auto-generated/llvm-overloaded-tests/vluxseg2ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxseg2ei32.c b/auto-generated/llvm-overloaded-tests/vluxseg2ei32.c index 1450f1853..80b37499d 100644 --- a/auto-generated/llvm-overloaded-tests/vluxseg2ei32.c +++ b/auto-generated/llvm-overloaded-tests/vluxseg2ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxseg2ei64.c b/auto-generated/llvm-overloaded-tests/vluxseg2ei64.c index 973845b72..d7cb8d1be 100644 --- a/auto-generated/llvm-overloaded-tests/vluxseg2ei64.c +++ b/auto-generated/llvm-overloaded-tests/vluxseg2ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxseg2ei8.c b/auto-generated/llvm-overloaded-tests/vluxseg2ei8.c index 412eb2629..7ad2fe17a 100644 --- a/auto-generated/llvm-overloaded-tests/vluxseg2ei8.c +++ b/auto-generated/llvm-overloaded-tests/vluxseg2ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxseg3ei16.c b/auto-generated/llvm-overloaded-tests/vluxseg3ei16.c index ed396d770..91921f422 100644 --- a/auto-generated/llvm-overloaded-tests/vluxseg3ei16.c +++ b/auto-generated/llvm-overloaded-tests/vluxseg3ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxseg3ei32.c b/auto-generated/llvm-overloaded-tests/vluxseg3ei32.c index e8da081a4..ba7e54483 100644 --- a/auto-generated/llvm-overloaded-tests/vluxseg3ei32.c +++ b/auto-generated/llvm-overloaded-tests/vluxseg3ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxseg3ei64.c b/auto-generated/llvm-overloaded-tests/vluxseg3ei64.c index 4efeaa592..c130f3146 100644 --- a/auto-generated/llvm-overloaded-tests/vluxseg3ei64.c +++ b/auto-generated/llvm-overloaded-tests/vluxseg3ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxseg3ei8.c b/auto-generated/llvm-overloaded-tests/vluxseg3ei8.c index 5da504730..76b95b448 100644 --- a/auto-generated/llvm-overloaded-tests/vluxseg3ei8.c +++ b/auto-generated/llvm-overloaded-tests/vluxseg3ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxseg4ei16.c b/auto-generated/llvm-overloaded-tests/vluxseg4ei16.c index fb79d6588..5bf294cfa 100644 --- a/auto-generated/llvm-overloaded-tests/vluxseg4ei16.c +++ b/auto-generated/llvm-overloaded-tests/vluxseg4ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxseg4ei32.c b/auto-generated/llvm-overloaded-tests/vluxseg4ei32.c index 2c1b1d84f..16af2fa9c 100644 --- a/auto-generated/llvm-overloaded-tests/vluxseg4ei32.c +++ b/auto-generated/llvm-overloaded-tests/vluxseg4ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxseg4ei64.c b/auto-generated/llvm-overloaded-tests/vluxseg4ei64.c index 5d0022ce6..c6a7eba92 100644 --- a/auto-generated/llvm-overloaded-tests/vluxseg4ei64.c +++ b/auto-generated/llvm-overloaded-tests/vluxseg4ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxseg4ei8.c b/auto-generated/llvm-overloaded-tests/vluxseg4ei8.c index 142be874e..9f8aeb0cd 100644 --- a/auto-generated/llvm-overloaded-tests/vluxseg4ei8.c +++ b/auto-generated/llvm-overloaded-tests/vluxseg4ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxseg5ei16.c b/auto-generated/llvm-overloaded-tests/vluxseg5ei16.c index 346bb6983..7b9845c2d 100644 --- a/auto-generated/llvm-overloaded-tests/vluxseg5ei16.c +++ b/auto-generated/llvm-overloaded-tests/vluxseg5ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxseg5ei32.c b/auto-generated/llvm-overloaded-tests/vluxseg5ei32.c index 381855531..c1a79dc4f 100644 --- a/auto-generated/llvm-overloaded-tests/vluxseg5ei32.c +++ b/auto-generated/llvm-overloaded-tests/vluxseg5ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxseg5ei64.c b/auto-generated/llvm-overloaded-tests/vluxseg5ei64.c index 49924b654..8eaad23f9 100644 --- a/auto-generated/llvm-overloaded-tests/vluxseg5ei64.c +++ b/auto-generated/llvm-overloaded-tests/vluxseg5ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxseg5ei8.c b/auto-generated/llvm-overloaded-tests/vluxseg5ei8.c index 944ec0c73..72ca29b49 100644 --- a/auto-generated/llvm-overloaded-tests/vluxseg5ei8.c +++ b/auto-generated/llvm-overloaded-tests/vluxseg5ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxseg6ei16.c b/auto-generated/llvm-overloaded-tests/vluxseg6ei16.c index 4582e9ba2..710fe0197 100644 --- a/auto-generated/llvm-overloaded-tests/vluxseg6ei16.c +++ b/auto-generated/llvm-overloaded-tests/vluxseg6ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxseg6ei32.c b/auto-generated/llvm-overloaded-tests/vluxseg6ei32.c index 052e6ca7d..df279b4c3 100644 --- a/auto-generated/llvm-overloaded-tests/vluxseg6ei32.c +++ b/auto-generated/llvm-overloaded-tests/vluxseg6ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxseg6ei64.c b/auto-generated/llvm-overloaded-tests/vluxseg6ei64.c index 1a4c10d0c..75c47941a 100644 --- a/auto-generated/llvm-overloaded-tests/vluxseg6ei64.c +++ b/auto-generated/llvm-overloaded-tests/vluxseg6ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxseg6ei8.c b/auto-generated/llvm-overloaded-tests/vluxseg6ei8.c index a8f6d44ca..10446a70a 100644 --- a/auto-generated/llvm-overloaded-tests/vluxseg6ei8.c +++ b/auto-generated/llvm-overloaded-tests/vluxseg6ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxseg7ei16.c b/auto-generated/llvm-overloaded-tests/vluxseg7ei16.c index 09303d07e..251e1bcc5 100644 --- a/auto-generated/llvm-overloaded-tests/vluxseg7ei16.c +++ b/auto-generated/llvm-overloaded-tests/vluxseg7ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxseg7ei32.c b/auto-generated/llvm-overloaded-tests/vluxseg7ei32.c index 9a0b3dd30..41a17c04a 100644 --- a/auto-generated/llvm-overloaded-tests/vluxseg7ei32.c +++ b/auto-generated/llvm-overloaded-tests/vluxseg7ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxseg7ei64.c b/auto-generated/llvm-overloaded-tests/vluxseg7ei64.c index 4c9898411..b4badc8a3 100644 --- a/auto-generated/llvm-overloaded-tests/vluxseg7ei64.c +++ b/auto-generated/llvm-overloaded-tests/vluxseg7ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxseg7ei8.c b/auto-generated/llvm-overloaded-tests/vluxseg7ei8.c index 17d9da73d..18ed76397 100644 --- a/auto-generated/llvm-overloaded-tests/vluxseg7ei8.c +++ b/auto-generated/llvm-overloaded-tests/vluxseg7ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxseg8ei16.c b/auto-generated/llvm-overloaded-tests/vluxseg8ei16.c index b61c5f247..e9424f533 100644 --- a/auto-generated/llvm-overloaded-tests/vluxseg8ei16.c +++ b/auto-generated/llvm-overloaded-tests/vluxseg8ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxseg8ei32.c b/auto-generated/llvm-overloaded-tests/vluxseg8ei32.c index 7041719e5..79a13a75b 100644 --- a/auto-generated/llvm-overloaded-tests/vluxseg8ei32.c +++ b/auto-generated/llvm-overloaded-tests/vluxseg8ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxseg8ei64.c b/auto-generated/llvm-overloaded-tests/vluxseg8ei64.c index 5e5a19830..5a645c41c 100644 --- a/auto-generated/llvm-overloaded-tests/vluxseg8ei64.c +++ b/auto-generated/llvm-overloaded-tests/vluxseg8ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vluxseg8ei8.c b/auto-generated/llvm-overloaded-tests/vluxseg8ei8.c index e7658139c..a3734f5b6 100644 --- a/auto-generated/llvm-overloaded-tests/vluxseg8ei8.c +++ b/auto-generated/llvm-overloaded-tests/vluxseg8ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vmacc.c b/auto-generated/llvm-overloaded-tests/vmacc.c index 01c72cba5..42fc710a9 100644 --- a/auto-generated/llvm-overloaded-tests/vmacc.c +++ b/auto-generated/llvm-overloaded-tests/vmacc.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vmadd.c b/auto-generated/llvm-overloaded-tests/vmadd.c index c046afb41..189fc2d0e 100644 --- a/auto-generated/llvm-overloaded-tests/vmadd.c +++ b/auto-generated/llvm-overloaded-tests/vmadd.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vmerge.c b/auto-generated/llvm-overloaded-tests/vmerge.c index b97674761..badd42e62 100644 --- a/auto-generated/llvm-overloaded-tests/vmerge.c +++ b/auto-generated/llvm-overloaded-tests/vmerge.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vmfeq.c b/auto-generated/llvm-overloaded-tests/vmfeq.c index 56b8c60c8..da2cd8c16 100644 --- a/auto-generated/llvm-overloaded-tests/vmfeq.c +++ b/auto-generated/llvm-overloaded-tests/vmfeq.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vmfge.c b/auto-generated/llvm-overloaded-tests/vmfge.c index 6527dbd1d..92ca4dd06 100644 --- a/auto-generated/llvm-overloaded-tests/vmfge.c +++ b/auto-generated/llvm-overloaded-tests/vmfge.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vmfgt.c b/auto-generated/llvm-overloaded-tests/vmfgt.c index 3ac8d22fa..952021cb2 100644 --- a/auto-generated/llvm-overloaded-tests/vmfgt.c +++ b/auto-generated/llvm-overloaded-tests/vmfgt.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vmfle.c b/auto-generated/llvm-overloaded-tests/vmfle.c index 489b0fdcc..8e53ffafc 100644 --- a/auto-generated/llvm-overloaded-tests/vmfle.c +++ b/auto-generated/llvm-overloaded-tests/vmfle.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vmflt.c b/auto-generated/llvm-overloaded-tests/vmflt.c index b1712ce2b..97eb8937c 100644 --- a/auto-generated/llvm-overloaded-tests/vmflt.c +++ b/auto-generated/llvm-overloaded-tests/vmflt.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vmfne.c b/auto-generated/llvm-overloaded-tests/vmfne.c index df921da2e..f465c2593 100644 --- a/auto-generated/llvm-overloaded-tests/vmfne.c +++ b/auto-generated/llvm-overloaded-tests/vmfne.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vmmv.c b/auto-generated/llvm-overloaded-tests/vmmv.c index 65832b567..f7de0202f 100644 --- a/auto-generated/llvm-overloaded-tests/vmmv.c +++ b/auto-generated/llvm-overloaded-tests/vmmv.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vmseq.c b/auto-generated/llvm-overloaded-tests/vmseq.c index fecfed872..fb4ed06db 100644 --- a/auto-generated/llvm-overloaded-tests/vmseq.c +++ b/auto-generated/llvm-overloaded-tests/vmseq.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vmsge.c b/auto-generated/llvm-overloaded-tests/vmsge.c index c70c8efbe..09e8d0def 100644 --- a/auto-generated/llvm-overloaded-tests/vmsge.c +++ b/auto-generated/llvm-overloaded-tests/vmsge.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vmsgeu.c b/auto-generated/llvm-overloaded-tests/vmsgeu.c index 82fc1ceb7..8ca98dcc4 100644 --- a/auto-generated/llvm-overloaded-tests/vmsgeu.c +++ b/auto-generated/llvm-overloaded-tests/vmsgeu.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vmsgt.c b/auto-generated/llvm-overloaded-tests/vmsgt.c index 72fe47564..df04a5454 100644 --- a/auto-generated/llvm-overloaded-tests/vmsgt.c +++ b/auto-generated/llvm-overloaded-tests/vmsgt.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vmsgtu.c b/auto-generated/llvm-overloaded-tests/vmsgtu.c index d0a01b291..2377bb157 100644 --- a/auto-generated/llvm-overloaded-tests/vmsgtu.c +++ b/auto-generated/llvm-overloaded-tests/vmsgtu.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vmsle.c b/auto-generated/llvm-overloaded-tests/vmsle.c index 8b2b35c25..b65691e19 100644 --- a/auto-generated/llvm-overloaded-tests/vmsle.c +++ b/auto-generated/llvm-overloaded-tests/vmsle.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vmsleu.c b/auto-generated/llvm-overloaded-tests/vmsleu.c index 426d8e66d..e6a1058c6 100644 --- a/auto-generated/llvm-overloaded-tests/vmsleu.c +++ b/auto-generated/llvm-overloaded-tests/vmsleu.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vmslt.c b/auto-generated/llvm-overloaded-tests/vmslt.c index 86d1df4d3..45618f3ff 100644 --- a/auto-generated/llvm-overloaded-tests/vmslt.c +++ b/auto-generated/llvm-overloaded-tests/vmslt.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vmsltu.c b/auto-generated/llvm-overloaded-tests/vmsltu.c index 9c64d831a..f275dd3f1 100644 --- a/auto-generated/llvm-overloaded-tests/vmsltu.c +++ b/auto-generated/llvm-overloaded-tests/vmsltu.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vmsne.c b/auto-generated/llvm-overloaded-tests/vmsne.c index 5e85996e3..f0e5507bc 100644 --- a/auto-generated/llvm-overloaded-tests/vmsne.c +++ b/auto-generated/llvm-overloaded-tests/vmsne.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vmv.c b/auto-generated/llvm-overloaded-tests/vmv.c index 4ce114b1f..338dfbe63 100644 --- a/auto-generated/llvm-overloaded-tests/vmv.c +++ b/auto-generated/llvm-overloaded-tests/vmv.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vneg.c b/auto-generated/llvm-overloaded-tests/vneg.c index 15a05dc21..cd484c7b7 100644 --- a/auto-generated/llvm-overloaded-tests/vneg.c +++ b/auto-generated/llvm-overloaded-tests/vneg.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vnmsac.c b/auto-generated/llvm-overloaded-tests/vnmsac.c index 8caf88ee3..100ca6ba3 100644 --- a/auto-generated/llvm-overloaded-tests/vnmsac.c +++ b/auto-generated/llvm-overloaded-tests/vnmsac.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vnmsub.c b/auto-generated/llvm-overloaded-tests/vnmsub.c index 26b6b4356..1b620b960 100644 --- a/auto-generated/llvm-overloaded-tests/vnmsub.c +++ b/auto-generated/llvm-overloaded-tests/vnmsub.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vreinterpret.c b/auto-generated/llvm-overloaded-tests/vreinterpret.c index 6e1139852..28508e4ee 100644 --- a/auto-generated/llvm-overloaded-tests/vreinterpret.c +++ b/auto-generated/llvm-overloaded-tests/vreinterpret.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vrgather.c b/auto-generated/llvm-overloaded-tests/vrgather.c index 204aa45e3..adc9e774f 100644 --- a/auto-generated/llvm-overloaded-tests/vrgather.c +++ b/auto-generated/llvm-overloaded-tests/vrgather.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vrgatherei16.c b/auto-generated/llvm-overloaded-tests/vrgatherei16.c index 82f5c81cf..1ff1f2ce2 100644 --- a/auto-generated/llvm-overloaded-tests/vrgatherei16.c +++ b/auto-generated/llvm-overloaded-tests/vrgatherei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vse16.c b/auto-generated/llvm-overloaded-tests/vse16.c index 78bf1da3a..d13ce1acd 100644 --- a/auto-generated/llvm-overloaded-tests/vse16.c +++ b/auto-generated/llvm-overloaded-tests/vse16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vse32.c b/auto-generated/llvm-overloaded-tests/vse32.c index cff822b03..05c2817e8 100644 --- a/auto-generated/llvm-overloaded-tests/vse32.c +++ b/auto-generated/llvm-overloaded-tests/vse32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vse64.c b/auto-generated/llvm-overloaded-tests/vse64.c index 9e642087c..1c5352f6f 100644 --- a/auto-generated/llvm-overloaded-tests/vse64.c +++ b/auto-generated/llvm-overloaded-tests/vse64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vset.c b/auto-generated/llvm-overloaded-tests/vset.c index 31f8a26ee..f9f89f98f 100644 --- a/auto-generated/llvm-overloaded-tests/vset.c +++ b/auto-generated/llvm-overloaded-tests/vset.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vslidedown.c b/auto-generated/llvm-overloaded-tests/vslidedown.c index 94553793c..6b1fca8f9 100644 --- a/auto-generated/llvm-overloaded-tests/vslidedown.c +++ b/auto-generated/llvm-overloaded-tests/vslidedown.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vslideup.c b/auto-generated/llvm-overloaded-tests/vslideup.c index 62db73b80..cad4d9a1d 100644 --- a/auto-generated/llvm-overloaded-tests/vslideup.c +++ b/auto-generated/llvm-overloaded-tests/vslideup.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxei16.c b/auto-generated/llvm-overloaded-tests/vsoxei16.c index 9f09dfa95..4876f9fd0 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxei16.c +++ b/auto-generated/llvm-overloaded-tests/vsoxei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxei32.c b/auto-generated/llvm-overloaded-tests/vsoxei32.c index 3866e2bc5..5ddd1c98e 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxei32.c +++ b/auto-generated/llvm-overloaded-tests/vsoxei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxei64.c b/auto-generated/llvm-overloaded-tests/vsoxei64.c index 4bba2f0d8..0c004590c 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxei64.c +++ b/auto-generated/llvm-overloaded-tests/vsoxei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxei8.c b/auto-generated/llvm-overloaded-tests/vsoxei8.c index 74580c22b..234b660c2 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxei8.c +++ b/auto-generated/llvm-overloaded-tests/vsoxei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxseg2ei16.c b/auto-generated/llvm-overloaded-tests/vsoxseg2ei16.c index 42af3003a..25f049a04 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxseg2ei16.c +++ b/auto-generated/llvm-overloaded-tests/vsoxseg2ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxseg2ei32.c b/auto-generated/llvm-overloaded-tests/vsoxseg2ei32.c index 7f37a507e..1bd36e728 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxseg2ei32.c +++ b/auto-generated/llvm-overloaded-tests/vsoxseg2ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxseg2ei64.c b/auto-generated/llvm-overloaded-tests/vsoxseg2ei64.c index 5412ce0b6..debd8d5a6 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxseg2ei64.c +++ b/auto-generated/llvm-overloaded-tests/vsoxseg2ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxseg2ei8.c b/auto-generated/llvm-overloaded-tests/vsoxseg2ei8.c index 6013758b8..d1879dbf5 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxseg2ei8.c +++ b/auto-generated/llvm-overloaded-tests/vsoxseg2ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxseg3ei16.c b/auto-generated/llvm-overloaded-tests/vsoxseg3ei16.c index 4c23fa58b..ac68cd348 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxseg3ei16.c +++ b/auto-generated/llvm-overloaded-tests/vsoxseg3ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxseg3ei32.c b/auto-generated/llvm-overloaded-tests/vsoxseg3ei32.c index 5cec2c83f..b1d6d25d9 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxseg3ei32.c +++ b/auto-generated/llvm-overloaded-tests/vsoxseg3ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxseg3ei64.c b/auto-generated/llvm-overloaded-tests/vsoxseg3ei64.c index bfd5535cb..c101dbb86 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxseg3ei64.c +++ b/auto-generated/llvm-overloaded-tests/vsoxseg3ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxseg3ei8.c b/auto-generated/llvm-overloaded-tests/vsoxseg3ei8.c index 7fbd644ea..416c27036 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxseg3ei8.c +++ b/auto-generated/llvm-overloaded-tests/vsoxseg3ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxseg4ei16.c b/auto-generated/llvm-overloaded-tests/vsoxseg4ei16.c index c1ea561b7..625d3a86a 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxseg4ei16.c +++ b/auto-generated/llvm-overloaded-tests/vsoxseg4ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxseg4ei32.c b/auto-generated/llvm-overloaded-tests/vsoxseg4ei32.c index abb861004..b888f4602 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxseg4ei32.c +++ b/auto-generated/llvm-overloaded-tests/vsoxseg4ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxseg4ei64.c b/auto-generated/llvm-overloaded-tests/vsoxseg4ei64.c index 43760bdd1..3baaa140f 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxseg4ei64.c +++ b/auto-generated/llvm-overloaded-tests/vsoxseg4ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxseg4ei8.c b/auto-generated/llvm-overloaded-tests/vsoxseg4ei8.c index c608823e1..b1a9ca162 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxseg4ei8.c +++ b/auto-generated/llvm-overloaded-tests/vsoxseg4ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxseg5ei16.c b/auto-generated/llvm-overloaded-tests/vsoxseg5ei16.c index 1dde607a8..eddf62310 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxseg5ei16.c +++ b/auto-generated/llvm-overloaded-tests/vsoxseg5ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxseg5ei32.c b/auto-generated/llvm-overloaded-tests/vsoxseg5ei32.c index a5011ba4a..16990807e 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxseg5ei32.c +++ b/auto-generated/llvm-overloaded-tests/vsoxseg5ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxseg5ei64.c b/auto-generated/llvm-overloaded-tests/vsoxseg5ei64.c index 90d810b61..3d3a1f0dc 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxseg5ei64.c +++ b/auto-generated/llvm-overloaded-tests/vsoxseg5ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxseg5ei8.c b/auto-generated/llvm-overloaded-tests/vsoxseg5ei8.c index 741819e1d..d45a0124e 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxseg5ei8.c +++ b/auto-generated/llvm-overloaded-tests/vsoxseg5ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxseg6ei16.c b/auto-generated/llvm-overloaded-tests/vsoxseg6ei16.c index e3238a455..a13c73b67 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxseg6ei16.c +++ b/auto-generated/llvm-overloaded-tests/vsoxseg6ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxseg6ei32.c b/auto-generated/llvm-overloaded-tests/vsoxseg6ei32.c index 9c29435bd..ede0dcdcc 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxseg6ei32.c +++ b/auto-generated/llvm-overloaded-tests/vsoxseg6ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxseg6ei64.c b/auto-generated/llvm-overloaded-tests/vsoxseg6ei64.c index e7d194800..aa069e675 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxseg6ei64.c +++ b/auto-generated/llvm-overloaded-tests/vsoxseg6ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxseg6ei8.c b/auto-generated/llvm-overloaded-tests/vsoxseg6ei8.c index 5c11df866..ca9e72b46 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxseg6ei8.c +++ b/auto-generated/llvm-overloaded-tests/vsoxseg6ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxseg7ei16.c b/auto-generated/llvm-overloaded-tests/vsoxseg7ei16.c index f3cb2089c..a9c01330e 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxseg7ei16.c +++ b/auto-generated/llvm-overloaded-tests/vsoxseg7ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxseg7ei32.c b/auto-generated/llvm-overloaded-tests/vsoxseg7ei32.c index f69cf20c4..8f60967ec 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxseg7ei32.c +++ b/auto-generated/llvm-overloaded-tests/vsoxseg7ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxseg7ei64.c b/auto-generated/llvm-overloaded-tests/vsoxseg7ei64.c index b8bcd92d3..160de6109 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxseg7ei64.c +++ b/auto-generated/llvm-overloaded-tests/vsoxseg7ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxseg7ei8.c b/auto-generated/llvm-overloaded-tests/vsoxseg7ei8.c index c7b8d3388..ddb4426d5 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxseg7ei8.c +++ b/auto-generated/llvm-overloaded-tests/vsoxseg7ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxseg8ei16.c b/auto-generated/llvm-overloaded-tests/vsoxseg8ei16.c index 0e3e8cedb..7c9249261 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxseg8ei16.c +++ b/auto-generated/llvm-overloaded-tests/vsoxseg8ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxseg8ei32.c b/auto-generated/llvm-overloaded-tests/vsoxseg8ei32.c index d9a1fe42e..380022c04 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxseg8ei32.c +++ b/auto-generated/llvm-overloaded-tests/vsoxseg8ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxseg8ei64.c b/auto-generated/llvm-overloaded-tests/vsoxseg8ei64.c index 08190b607..3cbdb37bd 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxseg8ei64.c +++ b/auto-generated/llvm-overloaded-tests/vsoxseg8ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsoxseg8ei8.c b/auto-generated/llvm-overloaded-tests/vsoxseg8ei8.c index 3c9fdff15..0b224ff71 100644 --- a/auto-generated/llvm-overloaded-tests/vsoxseg8ei8.c +++ b/auto-generated/llvm-overloaded-tests/vsoxseg8ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsse16.c b/auto-generated/llvm-overloaded-tests/vsse16.c index 1cf57e1d5..7f44dc849 100644 --- a/auto-generated/llvm-overloaded-tests/vsse16.c +++ b/auto-generated/llvm-overloaded-tests/vsse16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsse32.c b/auto-generated/llvm-overloaded-tests/vsse32.c index 2475911e3..baeae97ed 100644 --- a/auto-generated/llvm-overloaded-tests/vsse32.c +++ b/auto-generated/llvm-overloaded-tests/vsse32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsse64.c b/auto-generated/llvm-overloaded-tests/vsse64.c index b6aa913f8..e4afe3346 100644 --- a/auto-generated/llvm-overloaded-tests/vsse64.c +++ b/auto-generated/llvm-overloaded-tests/vsse64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsseg2e16.c b/auto-generated/llvm-overloaded-tests/vsseg2e16.c index 43238cab1..415a55b37 100644 --- a/auto-generated/llvm-overloaded-tests/vsseg2e16.c +++ b/auto-generated/llvm-overloaded-tests/vsseg2e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsseg2e32.c b/auto-generated/llvm-overloaded-tests/vsseg2e32.c index 5e216dc46..036f11cce 100644 --- a/auto-generated/llvm-overloaded-tests/vsseg2e32.c +++ b/auto-generated/llvm-overloaded-tests/vsseg2e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsseg2e64.c b/auto-generated/llvm-overloaded-tests/vsseg2e64.c index dcb9ed1f1..35847f988 100644 --- a/auto-generated/llvm-overloaded-tests/vsseg2e64.c +++ b/auto-generated/llvm-overloaded-tests/vsseg2e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsseg3e16.c b/auto-generated/llvm-overloaded-tests/vsseg3e16.c index 25e0acd41..70e03adb6 100644 --- a/auto-generated/llvm-overloaded-tests/vsseg3e16.c +++ b/auto-generated/llvm-overloaded-tests/vsseg3e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsseg3e32.c b/auto-generated/llvm-overloaded-tests/vsseg3e32.c index e1dae8c47..f1f6fcc54 100644 --- a/auto-generated/llvm-overloaded-tests/vsseg3e32.c +++ b/auto-generated/llvm-overloaded-tests/vsseg3e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsseg3e64.c b/auto-generated/llvm-overloaded-tests/vsseg3e64.c index d48c6dc50..a3f33beee 100644 --- a/auto-generated/llvm-overloaded-tests/vsseg3e64.c +++ b/auto-generated/llvm-overloaded-tests/vsseg3e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsseg4e16.c b/auto-generated/llvm-overloaded-tests/vsseg4e16.c index 6cbaae422..06ec69ba2 100644 --- a/auto-generated/llvm-overloaded-tests/vsseg4e16.c +++ b/auto-generated/llvm-overloaded-tests/vsseg4e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsseg4e32.c b/auto-generated/llvm-overloaded-tests/vsseg4e32.c index f56835928..fae06b3ee 100644 --- a/auto-generated/llvm-overloaded-tests/vsseg4e32.c +++ b/auto-generated/llvm-overloaded-tests/vsseg4e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsseg4e64.c b/auto-generated/llvm-overloaded-tests/vsseg4e64.c index 57f43fc22..3dd378731 100644 --- a/auto-generated/llvm-overloaded-tests/vsseg4e64.c +++ b/auto-generated/llvm-overloaded-tests/vsseg4e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsseg5e16.c b/auto-generated/llvm-overloaded-tests/vsseg5e16.c index cf3ad5532..f3e196ba4 100644 --- a/auto-generated/llvm-overloaded-tests/vsseg5e16.c +++ b/auto-generated/llvm-overloaded-tests/vsseg5e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsseg5e32.c b/auto-generated/llvm-overloaded-tests/vsseg5e32.c index 956d08e51..1bf384edd 100644 --- a/auto-generated/llvm-overloaded-tests/vsseg5e32.c +++ b/auto-generated/llvm-overloaded-tests/vsseg5e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsseg5e64.c b/auto-generated/llvm-overloaded-tests/vsseg5e64.c index 2114c8e20..a3a385864 100644 --- a/auto-generated/llvm-overloaded-tests/vsseg5e64.c +++ b/auto-generated/llvm-overloaded-tests/vsseg5e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsseg6e16.c b/auto-generated/llvm-overloaded-tests/vsseg6e16.c index 6fcd61811..6d42bc4d3 100644 --- a/auto-generated/llvm-overloaded-tests/vsseg6e16.c +++ b/auto-generated/llvm-overloaded-tests/vsseg6e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsseg6e32.c b/auto-generated/llvm-overloaded-tests/vsseg6e32.c index e6cd0c6a0..ae33fc5de 100644 --- a/auto-generated/llvm-overloaded-tests/vsseg6e32.c +++ b/auto-generated/llvm-overloaded-tests/vsseg6e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsseg6e64.c b/auto-generated/llvm-overloaded-tests/vsseg6e64.c index aedfabe92..b54daa66a 100644 --- a/auto-generated/llvm-overloaded-tests/vsseg6e64.c +++ b/auto-generated/llvm-overloaded-tests/vsseg6e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsseg7e16.c b/auto-generated/llvm-overloaded-tests/vsseg7e16.c index ff1f2bed7..ab8bcdc64 100644 --- a/auto-generated/llvm-overloaded-tests/vsseg7e16.c +++ b/auto-generated/llvm-overloaded-tests/vsseg7e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsseg7e32.c b/auto-generated/llvm-overloaded-tests/vsseg7e32.c index 4de310bad..f618e5326 100644 --- a/auto-generated/llvm-overloaded-tests/vsseg7e32.c +++ b/auto-generated/llvm-overloaded-tests/vsseg7e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsseg7e64.c b/auto-generated/llvm-overloaded-tests/vsseg7e64.c index 5f3266a8c..195410936 100644 --- a/auto-generated/llvm-overloaded-tests/vsseg7e64.c +++ b/auto-generated/llvm-overloaded-tests/vsseg7e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsseg8e16.c b/auto-generated/llvm-overloaded-tests/vsseg8e16.c index a41ca55e4..127b78cd1 100644 --- a/auto-generated/llvm-overloaded-tests/vsseg8e16.c +++ b/auto-generated/llvm-overloaded-tests/vsseg8e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsseg8e32.c b/auto-generated/llvm-overloaded-tests/vsseg8e32.c index 19cf5df8d..ae05e8039 100644 --- a/auto-generated/llvm-overloaded-tests/vsseg8e32.c +++ b/auto-generated/llvm-overloaded-tests/vsseg8e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsseg8e64.c b/auto-generated/llvm-overloaded-tests/vsseg8e64.c index e4bdb7bd6..0380df56d 100644 --- a/auto-generated/llvm-overloaded-tests/vsseg8e64.c +++ b/auto-generated/llvm-overloaded-tests/vsseg8e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vssseg2e16.c b/auto-generated/llvm-overloaded-tests/vssseg2e16.c index e3c10da0d..5b2ccfb87 100644 --- a/auto-generated/llvm-overloaded-tests/vssseg2e16.c +++ b/auto-generated/llvm-overloaded-tests/vssseg2e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vssseg2e32.c b/auto-generated/llvm-overloaded-tests/vssseg2e32.c index 41678e400..3e66b0c61 100644 --- a/auto-generated/llvm-overloaded-tests/vssseg2e32.c +++ b/auto-generated/llvm-overloaded-tests/vssseg2e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vssseg2e64.c b/auto-generated/llvm-overloaded-tests/vssseg2e64.c index 74096a104..03735b1cf 100644 --- a/auto-generated/llvm-overloaded-tests/vssseg2e64.c +++ b/auto-generated/llvm-overloaded-tests/vssseg2e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vssseg3e16.c b/auto-generated/llvm-overloaded-tests/vssseg3e16.c index b8efd3374..0a5e34722 100644 --- a/auto-generated/llvm-overloaded-tests/vssseg3e16.c +++ b/auto-generated/llvm-overloaded-tests/vssseg3e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vssseg3e32.c b/auto-generated/llvm-overloaded-tests/vssseg3e32.c index dbce8bc59..bb8183c83 100644 --- a/auto-generated/llvm-overloaded-tests/vssseg3e32.c +++ b/auto-generated/llvm-overloaded-tests/vssseg3e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vssseg3e64.c b/auto-generated/llvm-overloaded-tests/vssseg3e64.c index 32d4977bc..d481537ab 100644 --- a/auto-generated/llvm-overloaded-tests/vssseg3e64.c +++ b/auto-generated/llvm-overloaded-tests/vssseg3e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vssseg4e16.c b/auto-generated/llvm-overloaded-tests/vssseg4e16.c index 9b4e16416..80d6e21c3 100644 --- a/auto-generated/llvm-overloaded-tests/vssseg4e16.c +++ b/auto-generated/llvm-overloaded-tests/vssseg4e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vssseg4e32.c b/auto-generated/llvm-overloaded-tests/vssseg4e32.c index dc0e89ad1..e6f45c6ba 100644 --- a/auto-generated/llvm-overloaded-tests/vssseg4e32.c +++ b/auto-generated/llvm-overloaded-tests/vssseg4e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vssseg4e64.c b/auto-generated/llvm-overloaded-tests/vssseg4e64.c index 38f19c503..f5644a76f 100644 --- a/auto-generated/llvm-overloaded-tests/vssseg4e64.c +++ b/auto-generated/llvm-overloaded-tests/vssseg4e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vssseg5e16.c b/auto-generated/llvm-overloaded-tests/vssseg5e16.c index 5c8157e6e..ec2e3c51e 100644 --- a/auto-generated/llvm-overloaded-tests/vssseg5e16.c +++ b/auto-generated/llvm-overloaded-tests/vssseg5e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vssseg5e32.c b/auto-generated/llvm-overloaded-tests/vssseg5e32.c index 7f22fbbfd..67db8b2c5 100644 --- a/auto-generated/llvm-overloaded-tests/vssseg5e32.c +++ b/auto-generated/llvm-overloaded-tests/vssseg5e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vssseg5e64.c b/auto-generated/llvm-overloaded-tests/vssseg5e64.c index 789412209..b463bb0bf 100644 --- a/auto-generated/llvm-overloaded-tests/vssseg5e64.c +++ b/auto-generated/llvm-overloaded-tests/vssseg5e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vssseg6e16.c b/auto-generated/llvm-overloaded-tests/vssseg6e16.c index 9a020de8e..129a7bfdd 100644 --- a/auto-generated/llvm-overloaded-tests/vssseg6e16.c +++ b/auto-generated/llvm-overloaded-tests/vssseg6e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vssseg6e32.c b/auto-generated/llvm-overloaded-tests/vssseg6e32.c index 03027fccd..8c40ed263 100644 --- a/auto-generated/llvm-overloaded-tests/vssseg6e32.c +++ b/auto-generated/llvm-overloaded-tests/vssseg6e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vssseg6e64.c b/auto-generated/llvm-overloaded-tests/vssseg6e64.c index 3d009cb04..82b7a75d0 100644 --- a/auto-generated/llvm-overloaded-tests/vssseg6e64.c +++ b/auto-generated/llvm-overloaded-tests/vssseg6e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vssseg7e16.c b/auto-generated/llvm-overloaded-tests/vssseg7e16.c index ce786f3d2..a0035d7fb 100644 --- a/auto-generated/llvm-overloaded-tests/vssseg7e16.c +++ b/auto-generated/llvm-overloaded-tests/vssseg7e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vssseg7e32.c b/auto-generated/llvm-overloaded-tests/vssseg7e32.c index f326e9039..f552d57f7 100644 --- a/auto-generated/llvm-overloaded-tests/vssseg7e32.c +++ b/auto-generated/llvm-overloaded-tests/vssseg7e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vssseg7e64.c b/auto-generated/llvm-overloaded-tests/vssseg7e64.c index 3b805e414..2199614cb 100644 --- a/auto-generated/llvm-overloaded-tests/vssseg7e64.c +++ b/auto-generated/llvm-overloaded-tests/vssseg7e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vssseg8e16.c b/auto-generated/llvm-overloaded-tests/vssseg8e16.c index 656c67d7c..d3dcf76ae 100644 --- a/auto-generated/llvm-overloaded-tests/vssseg8e16.c +++ b/auto-generated/llvm-overloaded-tests/vssseg8e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vssseg8e32.c b/auto-generated/llvm-overloaded-tests/vssseg8e32.c index ed2192598..eb32d1ebb 100644 --- a/auto-generated/llvm-overloaded-tests/vssseg8e32.c +++ b/auto-generated/llvm-overloaded-tests/vssseg8e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vssseg8e64.c b/auto-generated/llvm-overloaded-tests/vssseg8e64.c index f537e5e9b..ab0614ac0 100644 --- a/auto-generated/llvm-overloaded-tests/vssseg8e64.c +++ b/auto-generated/llvm-overloaded-tests/vssseg8e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxei16.c b/auto-generated/llvm-overloaded-tests/vsuxei16.c index 50c969724..ef2ced4ee 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxei16.c +++ b/auto-generated/llvm-overloaded-tests/vsuxei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxei32.c b/auto-generated/llvm-overloaded-tests/vsuxei32.c index 4873fd9d2..d5c96c863 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxei32.c +++ b/auto-generated/llvm-overloaded-tests/vsuxei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxei64.c b/auto-generated/llvm-overloaded-tests/vsuxei64.c index 7d8101f35..22f00cc16 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxei64.c +++ b/auto-generated/llvm-overloaded-tests/vsuxei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxei8.c b/auto-generated/llvm-overloaded-tests/vsuxei8.c index 1f8cfb34a..1efc48bb2 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxei8.c +++ b/auto-generated/llvm-overloaded-tests/vsuxei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxseg2ei16.c b/auto-generated/llvm-overloaded-tests/vsuxseg2ei16.c index f3120eff2..29252c640 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxseg2ei16.c +++ b/auto-generated/llvm-overloaded-tests/vsuxseg2ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxseg2ei32.c b/auto-generated/llvm-overloaded-tests/vsuxseg2ei32.c index 701352e5d..a8f7220d4 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxseg2ei32.c +++ b/auto-generated/llvm-overloaded-tests/vsuxseg2ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxseg2ei64.c b/auto-generated/llvm-overloaded-tests/vsuxseg2ei64.c index 5038f7711..4ad8bb8b9 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxseg2ei64.c +++ b/auto-generated/llvm-overloaded-tests/vsuxseg2ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxseg2ei8.c b/auto-generated/llvm-overloaded-tests/vsuxseg2ei8.c index 2411b2327..eee7efb27 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxseg2ei8.c +++ b/auto-generated/llvm-overloaded-tests/vsuxseg2ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxseg3ei16.c b/auto-generated/llvm-overloaded-tests/vsuxseg3ei16.c index 19ec63512..72358b80d 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxseg3ei16.c +++ b/auto-generated/llvm-overloaded-tests/vsuxseg3ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxseg3ei32.c b/auto-generated/llvm-overloaded-tests/vsuxseg3ei32.c index 0d8ba91f4..047075c73 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxseg3ei32.c +++ b/auto-generated/llvm-overloaded-tests/vsuxseg3ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxseg3ei64.c b/auto-generated/llvm-overloaded-tests/vsuxseg3ei64.c index 3d50ceea6..6b0bea1b3 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxseg3ei64.c +++ b/auto-generated/llvm-overloaded-tests/vsuxseg3ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxseg3ei8.c b/auto-generated/llvm-overloaded-tests/vsuxseg3ei8.c index d0ac86932..82e515cc5 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxseg3ei8.c +++ b/auto-generated/llvm-overloaded-tests/vsuxseg3ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxseg4ei16.c b/auto-generated/llvm-overloaded-tests/vsuxseg4ei16.c index 349e97489..8b7b46575 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxseg4ei16.c +++ b/auto-generated/llvm-overloaded-tests/vsuxseg4ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxseg4ei32.c b/auto-generated/llvm-overloaded-tests/vsuxseg4ei32.c index d133f60f2..77be371e3 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxseg4ei32.c +++ b/auto-generated/llvm-overloaded-tests/vsuxseg4ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxseg4ei64.c b/auto-generated/llvm-overloaded-tests/vsuxseg4ei64.c index b6f36564c..7bda9e24b 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxseg4ei64.c +++ b/auto-generated/llvm-overloaded-tests/vsuxseg4ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxseg4ei8.c b/auto-generated/llvm-overloaded-tests/vsuxseg4ei8.c index a877dc94b..7659e59d0 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxseg4ei8.c +++ b/auto-generated/llvm-overloaded-tests/vsuxseg4ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxseg5ei16.c b/auto-generated/llvm-overloaded-tests/vsuxseg5ei16.c index f59d83ceb..0a1811e1c 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxseg5ei16.c +++ b/auto-generated/llvm-overloaded-tests/vsuxseg5ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxseg5ei32.c b/auto-generated/llvm-overloaded-tests/vsuxseg5ei32.c index 55329980b..5d4154fcb 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxseg5ei32.c +++ b/auto-generated/llvm-overloaded-tests/vsuxseg5ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxseg5ei64.c b/auto-generated/llvm-overloaded-tests/vsuxseg5ei64.c index 4713f4d87..50ffed379 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxseg5ei64.c +++ b/auto-generated/llvm-overloaded-tests/vsuxseg5ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxseg5ei8.c b/auto-generated/llvm-overloaded-tests/vsuxseg5ei8.c index 9f94327a8..1e35c8213 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxseg5ei8.c +++ b/auto-generated/llvm-overloaded-tests/vsuxseg5ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxseg6ei16.c b/auto-generated/llvm-overloaded-tests/vsuxseg6ei16.c index 74432c2d8..84358a7c8 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxseg6ei16.c +++ b/auto-generated/llvm-overloaded-tests/vsuxseg6ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxseg6ei32.c b/auto-generated/llvm-overloaded-tests/vsuxseg6ei32.c index c240ec510..bfdb9c6aa 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxseg6ei32.c +++ b/auto-generated/llvm-overloaded-tests/vsuxseg6ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxseg6ei64.c b/auto-generated/llvm-overloaded-tests/vsuxseg6ei64.c index 1bb65ab8b..8a5463a75 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxseg6ei64.c +++ b/auto-generated/llvm-overloaded-tests/vsuxseg6ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxseg6ei8.c b/auto-generated/llvm-overloaded-tests/vsuxseg6ei8.c index 80d482387..83f1fe8fc 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxseg6ei8.c +++ b/auto-generated/llvm-overloaded-tests/vsuxseg6ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxseg7ei16.c b/auto-generated/llvm-overloaded-tests/vsuxseg7ei16.c index 61983c35c..251f565d7 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxseg7ei16.c +++ b/auto-generated/llvm-overloaded-tests/vsuxseg7ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxseg7ei32.c b/auto-generated/llvm-overloaded-tests/vsuxseg7ei32.c index ce187e2c6..f0f316ed7 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxseg7ei32.c +++ b/auto-generated/llvm-overloaded-tests/vsuxseg7ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxseg7ei64.c b/auto-generated/llvm-overloaded-tests/vsuxseg7ei64.c index b6602d5b1..28c8902ba 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxseg7ei64.c +++ b/auto-generated/llvm-overloaded-tests/vsuxseg7ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxseg7ei8.c b/auto-generated/llvm-overloaded-tests/vsuxseg7ei8.c index b94f60512..f474b10b5 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxseg7ei8.c +++ b/auto-generated/llvm-overloaded-tests/vsuxseg7ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxseg8ei16.c b/auto-generated/llvm-overloaded-tests/vsuxseg8ei16.c index 6111162b9..db6cd294e 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxseg8ei16.c +++ b/auto-generated/llvm-overloaded-tests/vsuxseg8ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxseg8ei32.c b/auto-generated/llvm-overloaded-tests/vsuxseg8ei32.c index eb5f7ee1e..a10f4167c 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxseg8ei32.c +++ b/auto-generated/llvm-overloaded-tests/vsuxseg8ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxseg8ei64.c b/auto-generated/llvm-overloaded-tests/vsuxseg8ei64.c index 16d60f754..271bfbcdf 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxseg8ei64.c +++ b/auto-generated/llvm-overloaded-tests/vsuxseg8ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vsuxseg8ei8.c b/auto-generated/llvm-overloaded-tests/vsuxseg8ei8.c index 3ae76a7d5..16335b590 100644 --- a/auto-generated/llvm-overloaded-tests/vsuxseg8ei8.c +++ b/auto-generated/llvm-overloaded-tests/vsuxseg8ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vwmacc.c b/auto-generated/llvm-overloaded-tests/vwmacc.c index 21b1d61b7..3466cccb3 100644 --- a/auto-generated/llvm-overloaded-tests/vwmacc.c +++ b/auto-generated/llvm-overloaded-tests/vwmacc.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vwmaccsu.c b/auto-generated/llvm-overloaded-tests/vwmaccsu.c index 4f05962d0..50b6a51df 100644 --- a/auto-generated/llvm-overloaded-tests/vwmaccsu.c +++ b/auto-generated/llvm-overloaded-tests/vwmaccsu.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vwmaccu.c b/auto-generated/llvm-overloaded-tests/vwmaccu.c index b0d8ce766..05b09ea69 100644 --- a/auto-generated/llvm-overloaded-tests/vwmaccu.c +++ b/auto-generated/llvm-overloaded-tests/vwmaccu.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/llvm-overloaded-tests/vwmaccus.c b/auto-generated/llvm-overloaded-tests/vwmaccus.c index db831127c..b197b939e 100644 --- a/auto-generated/llvm-overloaded-tests/vwmaccus.c +++ b/auto-generated/llvm-overloaded-tests/vwmaccus.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/overloaded-api-testing/vcpop.c b/auto-generated/overloaded-api-testing/vcpop.c index f52439d1f..aeeeeb878 100644 --- a/auto-generated/overloaded-api-testing/vcpop.c +++ b/auto-generated/overloaded-api-testing/vcpop.c @@ -1,58 +1,58 @@ #include #include -unsigned int test_vcpop_m_b1(vbool1_t vs2, size_t vl) { +unsigned long test_vcpop_m_b1(vbool1_t vs2, size_t vl) { return __riscv_vcpop(vs2, vl); } -unsigned int test_vcpop_m_b2(vbool2_t vs2, size_t vl) { +unsigned long test_vcpop_m_b2(vbool2_t vs2, size_t vl) { return __riscv_vcpop(vs2, vl); } -unsigned int test_vcpop_m_b4(vbool4_t vs2, size_t vl) { +unsigned long test_vcpop_m_b4(vbool4_t vs2, size_t vl) { return __riscv_vcpop(vs2, vl); } -unsigned int test_vcpop_m_b8(vbool8_t vs2, size_t vl) { +unsigned long test_vcpop_m_b8(vbool8_t vs2, size_t vl) { return __riscv_vcpop(vs2, vl); } -unsigned int test_vcpop_m_b16(vbool16_t vs2, size_t vl) { +unsigned long test_vcpop_m_b16(vbool16_t vs2, size_t vl) { return __riscv_vcpop(vs2, vl); } -unsigned int test_vcpop_m_b32(vbool32_t vs2, size_t vl) { +unsigned long test_vcpop_m_b32(vbool32_t vs2, size_t vl) { return __riscv_vcpop(vs2, vl); } -unsigned int test_vcpop_m_b64(vbool64_t vs2, size_t vl) { +unsigned long test_vcpop_m_b64(vbool64_t vs2, size_t vl) { return __riscv_vcpop(vs2, vl); } -unsigned int test_vcpop_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { +unsigned long test_vcpop_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { return __riscv_vcpop(vm, vs2, vl); } -unsigned int test_vcpop_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { +unsigned long test_vcpop_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { return __riscv_vcpop(vm, vs2, vl); } -unsigned int test_vcpop_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { +unsigned long test_vcpop_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { return __riscv_vcpop(vm, vs2, vl); } -unsigned int test_vcpop_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { +unsigned long test_vcpop_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { return __riscv_vcpop(vm, vs2, vl); } -unsigned int test_vcpop_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { +unsigned long test_vcpop_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { return __riscv_vcpop(vm, vs2, vl); } -unsigned int test_vcpop_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { +unsigned long test_vcpop_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { return __riscv_vcpop(vm, vs2, vl); } -unsigned int test_vcpop_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { +unsigned long test_vcpop_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { return __riscv_vcpop(vm, vs2, vl); } diff --git a/auto-generated/overloaded-api-testing/vfirst.c b/auto-generated/overloaded-api-testing/vfirst.c index 72fdc0a2a..aa45333f4 100644 --- a/auto-generated/overloaded-api-testing/vfirst.c +++ b/auto-generated/overloaded-api-testing/vfirst.c @@ -1,58 +1,58 @@ #include #include -int test_vfirst_m_b1(vbool1_t vs2, size_t vl) { +long test_vfirst_m_b1(vbool1_t vs2, size_t vl) { return __riscv_vfirst(vs2, vl); } -int test_vfirst_m_b2(vbool2_t vs2, size_t vl) { +long test_vfirst_m_b2(vbool2_t vs2, size_t vl) { return __riscv_vfirst(vs2, vl); } -int test_vfirst_m_b4(vbool4_t vs2, size_t vl) { +long test_vfirst_m_b4(vbool4_t vs2, size_t vl) { return __riscv_vfirst(vs2, vl); } -int test_vfirst_m_b8(vbool8_t vs2, size_t vl) { +long test_vfirst_m_b8(vbool8_t vs2, size_t vl) { return __riscv_vfirst(vs2, vl); } -int test_vfirst_m_b16(vbool16_t vs2, size_t vl) { +long test_vfirst_m_b16(vbool16_t vs2, size_t vl) { return __riscv_vfirst(vs2, vl); } -int test_vfirst_m_b32(vbool32_t vs2, size_t vl) { +long test_vfirst_m_b32(vbool32_t vs2, size_t vl) { return __riscv_vfirst(vs2, vl); } -int test_vfirst_m_b64(vbool64_t vs2, size_t vl) { +long test_vfirst_m_b64(vbool64_t vs2, size_t vl) { return __riscv_vfirst(vs2, vl); } -int test_vfirst_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { +long test_vfirst_m_b1_m(vbool1_t vm, vbool1_t vs2, size_t vl) { return __riscv_vfirst(vm, vs2, vl); } -int test_vfirst_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { +long test_vfirst_m_b2_m(vbool2_t vm, vbool2_t vs2, size_t vl) { return __riscv_vfirst(vm, vs2, vl); } -int test_vfirst_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { +long test_vfirst_m_b4_m(vbool4_t vm, vbool4_t vs2, size_t vl) { return __riscv_vfirst(vm, vs2, vl); } -int test_vfirst_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { +long test_vfirst_m_b8_m(vbool8_t vm, vbool8_t vs2, size_t vl) { return __riscv_vfirst(vm, vs2, vl); } -int test_vfirst_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { +long test_vfirst_m_b16_m(vbool16_t vm, vbool16_t vs2, size_t vl) { return __riscv_vfirst(vm, vs2, vl); } -int test_vfirst_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { +long test_vfirst_m_b32_m(vbool32_t vm, vbool32_t vs2, size_t vl) { return __riscv_vfirst(vm, vs2, vl); } -int test_vfirst_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { +long test_vfirst_m_b64_m(vbool64_t vm, vbool64_t vs2, size_t vl) { return __riscv_vfirst(vm, vs2, vl); } diff --git a/auto-generated/overloaded_intrinsic_funcs.adoc b/auto-generated/overloaded_intrinsic_funcs.adoc index c0185119a..0a04e29f9 100644 --- a/auto-generated/overloaded_intrinsic_funcs.adoc +++ b/auto-generated/overloaded_intrinsic_funcs.adoc @@ -28761,6 +28761,7 @@ vuint64m8_t __riscv_vmv_v(vuint64m8_t vs1, size_t vl); [[overloaded-vector-single-width-saturating-add-and-subtract]] ==== Vector Single-Width Saturating Add and Subtract Intrinsics +After executing an intrinsic in this section, the `vxsat` CSR assumes an UNSPECIFIED value. [,c] ---- @@ -29955,6 +29956,7 @@ vuint64m8_t __riscv_vasubu(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, [[overloaded-vector-single-width-fractional-multiply-with-rounding-and-saturation]] ==== Vector Single-Width Fractional Multiply with Rounding and SaturationIntrinsics +After executing an intrinsic in this section, the `vxsat` CSR assumes an UNSPECIFIED value. [,c] ---- @@ -30499,6 +30501,7 @@ vuint64m8_t __riscv_vssrl(vbool8_t vm, vuint64m8_t vs2, size_t rs1, [[overloaded-vector-narrowing-fixed-point-clip]] ==== Vector Narrowing Fixed-Point Clip Intrinsics +After executing an intrinsic in this section, the `vxsat` CSR assumes an UNSPECIFIED value. [,c] ---- @@ -38718,21 +38721,21 @@ vbool64_t __riscv_vmnot(vbool64_t vs, size_t vl); [,c] ---- -unsigned int __riscv_vcpop(vbool1_t vs2, size_t vl); -unsigned int __riscv_vcpop(vbool2_t vs2, size_t vl); -unsigned int __riscv_vcpop(vbool4_t vs2, size_t vl); -unsigned int __riscv_vcpop(vbool8_t vs2, size_t vl); -unsigned int __riscv_vcpop(vbool16_t vs2, size_t vl); -unsigned int __riscv_vcpop(vbool32_t vs2, size_t vl); -unsigned int __riscv_vcpop(vbool64_t vs2, size_t vl); +unsigned long __riscv_vcpop(vbool1_t vs2, size_t vl); +unsigned long __riscv_vcpop(vbool2_t vs2, size_t vl); +unsigned long __riscv_vcpop(vbool4_t vs2, size_t vl); +unsigned long __riscv_vcpop(vbool8_t vs2, size_t vl); +unsigned long __riscv_vcpop(vbool16_t vs2, size_t vl); +unsigned long __riscv_vcpop(vbool32_t vs2, size_t vl); +unsigned long __riscv_vcpop(vbool64_t vs2, size_t vl); // masked functions -unsigned int __riscv_vcpop(vbool1_t vm, vbool1_t vs2, size_t vl); -unsigned int __riscv_vcpop(vbool2_t vm, vbool2_t vs2, size_t vl); -unsigned int __riscv_vcpop(vbool4_t vm, vbool4_t vs2, size_t vl); -unsigned int __riscv_vcpop(vbool8_t vm, vbool8_t vs2, size_t vl); -unsigned int __riscv_vcpop(vbool16_t vm, vbool16_t vs2, size_t vl); -unsigned int __riscv_vcpop(vbool32_t vm, vbool32_t vs2, size_t vl); -unsigned int __riscv_vcpop(vbool64_t vm, vbool64_t vs2, size_t vl); +unsigned long __riscv_vcpop(vbool1_t vm, vbool1_t vs2, size_t vl); +unsigned long __riscv_vcpop(vbool2_t vm, vbool2_t vs2, size_t vl); +unsigned long __riscv_vcpop(vbool4_t vm, vbool4_t vs2, size_t vl); +unsigned long __riscv_vcpop(vbool8_t vm, vbool8_t vs2, size_t vl); +unsigned long __riscv_vcpop(vbool16_t vm, vbool16_t vs2, size_t vl); +unsigned long __riscv_vcpop(vbool32_t vm, vbool32_t vs2, size_t vl); +unsigned long __riscv_vcpop(vbool64_t vm, vbool64_t vs2, size_t vl); ---- [[overloaded-vfirst-find-first-set-mask-bit]] @@ -38740,21 +38743,21 @@ unsigned int __riscv_vcpop(vbool64_t vm, vbool64_t vs2, size_t vl); [,c] ---- -int __riscv_vfirst(vbool1_t vs2, size_t vl); -int __riscv_vfirst(vbool2_t vs2, size_t vl); -int __riscv_vfirst(vbool4_t vs2, size_t vl); -int __riscv_vfirst(vbool8_t vs2, size_t vl); -int __riscv_vfirst(vbool16_t vs2, size_t vl); -int __riscv_vfirst(vbool32_t vs2, size_t vl); -int __riscv_vfirst(vbool64_t vs2, size_t vl); +long __riscv_vfirst(vbool1_t vs2, size_t vl); +long __riscv_vfirst(vbool2_t vs2, size_t vl); +long __riscv_vfirst(vbool4_t vs2, size_t vl); +long __riscv_vfirst(vbool8_t vs2, size_t vl); +long __riscv_vfirst(vbool16_t vs2, size_t vl); +long __riscv_vfirst(vbool32_t vs2, size_t vl); +long __riscv_vfirst(vbool64_t vs2, size_t vl); // masked functions -int __riscv_vfirst(vbool1_t vm, vbool1_t vs2, size_t vl); -int __riscv_vfirst(vbool2_t vm, vbool2_t vs2, size_t vl); -int __riscv_vfirst(vbool4_t vm, vbool4_t vs2, size_t vl); -int __riscv_vfirst(vbool8_t vm, vbool8_t vs2, size_t vl); -int __riscv_vfirst(vbool16_t vm, vbool16_t vs2, size_t vl); -int __riscv_vfirst(vbool32_t vm, vbool32_t vs2, size_t vl); -int __riscv_vfirst(vbool64_t vm, vbool64_t vs2, size_t vl); +long __riscv_vfirst(vbool1_t vm, vbool1_t vs2, size_t vl); +long __riscv_vfirst(vbool2_t vm, vbool2_t vs2, size_t vl); +long __riscv_vfirst(vbool4_t vm, vbool4_t vs2, size_t vl); +long __riscv_vfirst(vbool8_t vm, vbool8_t vs2, size_t vl); +long __riscv_vfirst(vbool16_t vm, vbool16_t vs2, size_t vl); +long __riscv_vfirst(vbool32_t vm, vbool32_t vs2, size_t vl); +long __riscv_vfirst(vbool64_t vm, vbool64_t vs2, size_t vl); ---- [[overloaded-vmsbfm-set-before-first-mask-bit]] diff --git a/auto-generated/overloaded_intrinsic_funcs/03_vector_fixed-point_arithmetic_intrinsics.adoc b/auto-generated/overloaded_intrinsic_funcs/03_vector_fixed-point_arithmetic_intrinsics.adoc index 59bcf0473..f79388153 100644 --- a/auto-generated/overloaded_intrinsic_funcs/03_vector_fixed-point_arithmetic_intrinsics.adoc +++ b/auto-generated/overloaded_intrinsic_funcs/03_vector_fixed-point_arithmetic_intrinsics.adoc @@ -3,6 +3,7 @@ [[overloaded-vector-single-width-saturating-add-and-subtract]] ==== Vector Single-Width Saturating Add and Subtract Intrinsics +After executing an intrinsic in this section, the `vxsat` CSR assumes an UNSPECIFIED value. [,c] ---- @@ -1197,6 +1198,7 @@ vuint64m8_t __riscv_vasubu(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, [[overloaded-vector-single-width-fractional-multiply-with-rounding-and-saturation]] ==== Vector Single-Width Fractional Multiply with Rounding and SaturationIntrinsics +After executing an intrinsic in this section, the `vxsat` CSR assumes an UNSPECIFIED value. [,c] ---- @@ -1741,6 +1743,7 @@ vuint64m8_t __riscv_vssrl(vbool8_t vm, vuint64m8_t vs2, size_t rs1, [[overloaded-vector-narrowing-fixed-point-clip]] ==== Vector Narrowing Fixed-Point Clip Intrinsics +After executing an intrinsic in this section, the `vxsat` CSR assumes an UNSPECIFIED value. [,c] ---- diff --git a/auto-generated/overloaded_intrinsic_funcs/06_vector_mask_intrinsics.adoc b/auto-generated/overloaded_intrinsic_funcs/06_vector_mask_intrinsics.adoc index 2cb224185..99968b005 100644 --- a/auto-generated/overloaded_intrinsic_funcs/06_vector_mask_intrinsics.adoc +++ b/auto-generated/overloaded_intrinsic_funcs/06_vector_mask_intrinsics.adoc @@ -83,21 +83,21 @@ vbool64_t __riscv_vmnot(vbool64_t vs, size_t vl); [,c] ---- -unsigned int __riscv_vcpop(vbool1_t vs2, size_t vl); -unsigned int __riscv_vcpop(vbool2_t vs2, size_t vl); -unsigned int __riscv_vcpop(vbool4_t vs2, size_t vl); -unsigned int __riscv_vcpop(vbool8_t vs2, size_t vl); -unsigned int __riscv_vcpop(vbool16_t vs2, size_t vl); -unsigned int __riscv_vcpop(vbool32_t vs2, size_t vl); -unsigned int __riscv_vcpop(vbool64_t vs2, size_t vl); +unsigned long __riscv_vcpop(vbool1_t vs2, size_t vl); +unsigned long __riscv_vcpop(vbool2_t vs2, size_t vl); +unsigned long __riscv_vcpop(vbool4_t vs2, size_t vl); +unsigned long __riscv_vcpop(vbool8_t vs2, size_t vl); +unsigned long __riscv_vcpop(vbool16_t vs2, size_t vl); +unsigned long __riscv_vcpop(vbool32_t vs2, size_t vl); +unsigned long __riscv_vcpop(vbool64_t vs2, size_t vl); // masked functions -unsigned int __riscv_vcpop(vbool1_t vm, vbool1_t vs2, size_t vl); -unsigned int __riscv_vcpop(vbool2_t vm, vbool2_t vs2, size_t vl); -unsigned int __riscv_vcpop(vbool4_t vm, vbool4_t vs2, size_t vl); -unsigned int __riscv_vcpop(vbool8_t vm, vbool8_t vs2, size_t vl); -unsigned int __riscv_vcpop(vbool16_t vm, vbool16_t vs2, size_t vl); -unsigned int __riscv_vcpop(vbool32_t vm, vbool32_t vs2, size_t vl); -unsigned int __riscv_vcpop(vbool64_t vm, vbool64_t vs2, size_t vl); +unsigned long __riscv_vcpop(vbool1_t vm, vbool1_t vs2, size_t vl); +unsigned long __riscv_vcpop(vbool2_t vm, vbool2_t vs2, size_t vl); +unsigned long __riscv_vcpop(vbool4_t vm, vbool4_t vs2, size_t vl); +unsigned long __riscv_vcpop(vbool8_t vm, vbool8_t vs2, size_t vl); +unsigned long __riscv_vcpop(vbool16_t vm, vbool16_t vs2, size_t vl); +unsigned long __riscv_vcpop(vbool32_t vm, vbool32_t vs2, size_t vl); +unsigned long __riscv_vcpop(vbool64_t vm, vbool64_t vs2, size_t vl); ---- [[overloaded-vfirst-find-first-set-mask-bit]] @@ -105,21 +105,21 @@ unsigned int __riscv_vcpop(vbool64_t vm, vbool64_t vs2, size_t vl); [,c] ---- -int __riscv_vfirst(vbool1_t vs2, size_t vl); -int __riscv_vfirst(vbool2_t vs2, size_t vl); -int __riscv_vfirst(vbool4_t vs2, size_t vl); -int __riscv_vfirst(vbool8_t vs2, size_t vl); -int __riscv_vfirst(vbool16_t vs2, size_t vl); -int __riscv_vfirst(vbool32_t vs2, size_t vl); -int __riscv_vfirst(vbool64_t vs2, size_t vl); +long __riscv_vfirst(vbool1_t vs2, size_t vl); +long __riscv_vfirst(vbool2_t vs2, size_t vl); +long __riscv_vfirst(vbool4_t vs2, size_t vl); +long __riscv_vfirst(vbool8_t vs2, size_t vl); +long __riscv_vfirst(vbool16_t vs2, size_t vl); +long __riscv_vfirst(vbool32_t vs2, size_t vl); +long __riscv_vfirst(vbool64_t vs2, size_t vl); // masked functions -int __riscv_vfirst(vbool1_t vm, vbool1_t vs2, size_t vl); -int __riscv_vfirst(vbool2_t vm, vbool2_t vs2, size_t vl); -int __riscv_vfirst(vbool4_t vm, vbool4_t vs2, size_t vl); -int __riscv_vfirst(vbool8_t vm, vbool8_t vs2, size_t vl); -int __riscv_vfirst(vbool16_t vm, vbool16_t vs2, size_t vl); -int __riscv_vfirst(vbool32_t vm, vbool32_t vs2, size_t vl); -int __riscv_vfirst(vbool64_t vm, vbool64_t vs2, size_t vl); +long __riscv_vfirst(vbool1_t vm, vbool1_t vs2, size_t vl); +long __riscv_vfirst(vbool2_t vm, vbool2_t vs2, size_t vl); +long __riscv_vfirst(vbool4_t vm, vbool4_t vs2, size_t vl); +long __riscv_vfirst(vbool8_t vm, vbool8_t vs2, size_t vl); +long __riscv_vfirst(vbool16_t vm, vbool16_t vs2, size_t vl); +long __riscv_vfirst(vbool32_t vm, vbool32_t vs2, size_t vl); +long __riscv_vfirst(vbool64_t vm, vbool64_t vs2, size_t vl); ---- [[overloaded-vmsbfm-set-before-first-mask-bit]] diff --git a/auto-generated/policy_funcs/intrinsic_funcs.adoc b/auto-generated/policy_funcs/intrinsic_funcs.adoc index 4140c7dd5..4856fb540 100644 --- a/auto-generated/policy_funcs/intrinsic_funcs.adoc +++ b/auto-generated/policy_funcs/intrinsic_funcs.adoc @@ -61366,6 +61366,7 @@ vuint64m8_t __riscv_vmv_v_x_u64m8_tu(vuint64m8_t vd, uint64_t rs1, size_t vl); [[policy-variant-vector-single-width-saturating-add-and-subtract]] ==== Vector Single-Width Saturating Add and Subtract Intrinsics +After executing an intrinsic in this section, the `vxsat` CSR assumes an UNSPECIFIED value. [,c] ---- @@ -65116,6 +65117,7 @@ vuint64m8_t __riscv_vasubu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, [[policy-variant-vector-single-width-fractional-multiply-with-rounding-and-saturation]] ==== Vector Single-Width Fractional Multiply with Rounding and SaturationIntrinsics +After executing an intrinsic in this section, the `vxsat` CSR assumes an UNSPECIFIED value. [,c] ---- @@ -66607,6 +66609,7 @@ vuint64m8_t __riscv_vssrl_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, [[policy-variant-vector-narrowing-fixed-point-clip]] ==== Vector Narrowing Fixed-Point Clip Intrinsics +After executing an intrinsic in this section, the `vxsat` CSR assumes an UNSPECIFIED value. [,c] ---- diff --git a/auto-generated/policy_funcs/intrinsic_funcs/03_vector_fixed-point_arithmetic_intrinsics.adoc b/auto-generated/policy_funcs/intrinsic_funcs/03_vector_fixed-point_arithmetic_intrinsics.adoc index 5e8183297..7b1985e13 100644 --- a/auto-generated/policy_funcs/intrinsic_funcs/03_vector_fixed-point_arithmetic_intrinsics.adoc +++ b/auto-generated/policy_funcs/intrinsic_funcs/03_vector_fixed-point_arithmetic_intrinsics.adoc @@ -3,6 +3,7 @@ [[policy-variant-vector-single-width-saturating-add-and-subtract]] ==== Vector Single-Width Saturating Add and Subtract Intrinsics +After executing an intrinsic in this section, the `vxsat` CSR assumes an UNSPECIFIED value. [,c] ---- @@ -3753,6 +3754,7 @@ vuint64m8_t __riscv_vasubu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, [[policy-variant-vector-single-width-fractional-multiply-with-rounding-and-saturation]] ==== Vector Single-Width Fractional Multiply with Rounding and SaturationIntrinsics +After executing an intrinsic in this section, the `vxsat` CSR assumes an UNSPECIFIED value. [,c] ---- @@ -5244,6 +5246,7 @@ vuint64m8_t __riscv_vssrl_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, [[policy-variant-vector-narrowing-fixed-point-clip]] ==== Vector Narrowing Fixed-Point Clip Intrinsics +After executing an intrinsic in this section, the `vxsat` CSR assumes an UNSPECIFIED value. [,c] ---- diff --git a/auto-generated/policy_funcs/llvm-api-tests/vaadd.c b/auto-generated/policy_funcs/llvm-api-tests/vaadd.c index b6d6dcc2e..3029151d9 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vaadd.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vaadd.c @@ -5,706 +5,891 @@ #include -vint8mf8_t test_vaadd_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vaadd_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, + size_t vl) { return __riscv_vaadd_vv_i8mf8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vaadd_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vaadd_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vaadd_vx_i8mf8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vaadd_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, + size_t vl) { return __riscv_vaadd_vv_i8mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vaadd_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vaadd_vx_i8mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vaadd_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, + size_t vl) { return __riscv_vaadd_vv_i8mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vaadd_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vaadd_vx_i8mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vaadd_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vaadd_vv_i8m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vaadd_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, + size_t vl) { return __riscv_vaadd_vx_i8m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vaadd_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, + size_t vl) { return __riscv_vaadd_vv_i8m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vaadd_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vaadd_vx_i8m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vaadd_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, + size_t vl) { return __riscv_vaadd_vv_i8m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vaadd_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vaadd_vx_i8m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vaadd_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, + size_t vl) { return __riscv_vaadd_vv_i8m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vaadd_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vaadd_vx_i8m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vaadd_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + vint16mf4_t vs1, size_t vl) { return __riscv_vaadd_vv_i16mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vaadd_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vaadd_vx_i16mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vaadd_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vaadd_vv_i16mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vaadd_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vaadd_vx_i16mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vaadd_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vaadd_vv_i16m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vaadd_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, + size_t vl) { return __riscv_vaadd_vx_i16m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vaadd_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, + size_t vl) { return __riscv_vaadd_vv_i16m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vaadd_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vaadd_vx_i16m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vaadd_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, + size_t vl) { return __riscv_vaadd_vv_i16m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vaadd_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vaadd_vx_i16m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vaadd_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, + size_t vl) { return __riscv_vaadd_vv_i16m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vaadd_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, + size_t vl) { return __riscv_vaadd_vx_i16m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vaadd_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vaadd_vv_i32mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vaadd_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vaadd_vx_i32mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vaadd_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vaadd_vv_i32m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vaadd_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, + size_t vl) { return __riscv_vaadd_vx_i32m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vaadd_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, + size_t vl) { return __riscv_vaadd_vv_i32m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vaadd_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vaadd_vx_i32m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vaadd_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, + size_t vl) { return __riscv_vaadd_vv_i32m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vaadd_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, + size_t vl) { return __riscv_vaadd_vx_i32m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vaadd_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, + size_t vl) { return __riscv_vaadd_vv_i32m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vaadd_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, + size_t vl) { return __riscv_vaadd_vx_i32m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vaadd_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vaadd_vv_i64m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vaadd_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, + size_t vl) { return __riscv_vaadd_vx_i64m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vaadd_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, + size_t vl) { return __riscv_vaadd_vv_i64m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vaadd_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, + size_t vl) { return __riscv_vaadd_vx_i64m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vaadd_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, + size_t vl) { return __riscv_vaadd_vv_i64m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vaadd_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, + size_t vl) { return __riscv_vaadd_vx_i64m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vaadd_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, + size_t vl) { return __riscv_vaadd_vv_i64m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vaadd_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, + size_t vl) { return __riscv_vaadd_vx_i64m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vaadd_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vaadd_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vaadd_vv_i8mf8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vaadd_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vaadd_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vaadd_vx_i8mf8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vaadd_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vaadd_vv_i8mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vaadd_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vaadd_vx_i8mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vaadd_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vaadd_vv_i8mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vaadd_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vaadd_vx_i8mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vaadd_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vaadd_vv_i8m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vaadd_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vaadd_vx_i8m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vaadd_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vaadd_vv_i8m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vaadd_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vaadd_vx_i8m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vaadd_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vaadd_vv_i8m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vaadd_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vaadd_vx_i8m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vaadd_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vaadd_vv_i8m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vaadd_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vaadd_vx_i8m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vaadd_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vaadd_vv_i16mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vaadd_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vaadd_vx_i16mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vaadd_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vaadd_vv_i16mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vaadd_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vaadd_vx_i16mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vaadd_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vaadd_vv_i16m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vaadd_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vaadd_vx_i16m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vaadd_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vaadd_vv_i16m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vaadd_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vaadd_vx_i16m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vaadd_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vaadd_vv_i16m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vaadd_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vaadd_vx_i16m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vaadd_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vaadd_vv_i16m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vaadd_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vaadd_vx_i16m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vaadd_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vaadd_vv_i32mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vaadd_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vaadd_vx_i32mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vaadd_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vaadd_vv_i32m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vaadd_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vaadd_vx_i32m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vaadd_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vaadd_vv_i32m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vaadd_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vaadd_vx_i32m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vaadd_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vaadd_vv_i32m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vaadd_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vaadd_vx_i32m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vaadd_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vaadd_vv_i32m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vaadd_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vaadd_vx_i32m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vaadd_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vaadd_vv_i64m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vaadd_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vaadd_vx_i64m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vaadd_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vaadd_vv_i64m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vaadd_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vaadd_vx_i64m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vaadd_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vaadd_vv_i64m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vaadd_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vaadd_vx_i64m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vaadd_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vaadd_vv_i64m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vaadd_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vaadd_vx_i64m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vaadd_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vaadd_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vaadd_vv_i8mf8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vaadd_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vaadd_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vaadd_vx_i8mf8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vaadd_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vaadd_vv_i8mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vaadd_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vaadd_vx_i8mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vaadd_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vaadd_vv_i8mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vaadd_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vaadd_vx_i8mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vaadd_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vaadd_vv_i8m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vaadd_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vaadd_vx_i8m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vaadd_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vaadd_vv_i8m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vaadd_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vaadd_vx_i8m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vaadd_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vaadd_vv_i8m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vaadd_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vaadd_vx_i8m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vaadd_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vaadd_vv_i8m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vaadd_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vaadd_vx_i8m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vaadd_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vaadd_vv_i16mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vaadd_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vaadd_vx_i16mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vaadd_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vaadd_vv_i16mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vaadd_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vaadd_vx_i16mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vaadd_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vaadd_vv_i16m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vaadd_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vaadd_vx_i16m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vaadd_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vaadd_vv_i16m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vaadd_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vaadd_vx_i16m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vaadd_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vaadd_vv_i16m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vaadd_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vaadd_vx_i16m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vaadd_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vaadd_vv_i16m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vaadd_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vaadd_vx_i16m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vaadd_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vaadd_vv_i32mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vaadd_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vaadd_vx_i32mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vaadd_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vaadd_vv_i32m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vaadd_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vaadd_vx_i32m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vaadd_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vaadd_vv_i32m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vaadd_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vaadd_vx_i32m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vaadd_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vaadd_vv_i32m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vaadd_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vaadd_vx_i32m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vaadd_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vaadd_vv_i32m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vaadd_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vaadd_vx_i32m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vaadd_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vaadd_vv_i64m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vaadd_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vaadd_vx_i64m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vaadd_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vaadd_vv_i64m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vaadd_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vaadd_vx_i64m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vaadd_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vaadd_vv_i64m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vaadd_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vaadd_vx_i64m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vaadd_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vaadd_vv_i64m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vaadd_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vaadd_vx_i64m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vaadd_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vaadd_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vaadd_vv_i8mf8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vaadd_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vaadd_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vaadd_vx_i8mf8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vaadd_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vaadd_vv_i8mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vaadd_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vaadd_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vaadd_vx_i8mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vaadd_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vaadd_vv_i8mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vaadd_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vaadd_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vaadd_vx_i8mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vaadd_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vaadd_vv_i8m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vaadd_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vaadd_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vaadd_vx_i8m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vaadd_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vaadd_vv_i8m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vaadd_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vaadd_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vaadd_vx_i8m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vaadd_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vaadd_vv_i8m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vaadd_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vaadd_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vaadd_vx_i8m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vaadd_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vaadd_vv_i8m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vaadd_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vaadd_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vaadd_vx_i8m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vaadd_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vaadd_vv_i16mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vaadd_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vaadd_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vaadd_vx_i16mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vaadd_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vaadd_vv_i16mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vaadd_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vaadd_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vaadd_vx_i16mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vaadd_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vaadd_vv_i16m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vaadd_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vaadd_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vaadd_vx_i16m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vaadd_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vaadd_vv_i16m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vaadd_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vaadd_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vaadd_vx_i16m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vaadd_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vaadd_vv_i16m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vaadd_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vaadd_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vaadd_vx_i16m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vaadd_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vaadd_vv_i16m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vaadd_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vaadd_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vaadd_vx_i16m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vaadd_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vaadd_vv_i32mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vaadd_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vaadd_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vaadd_vx_i32mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vaadd_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vaadd_vv_i32m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vaadd_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vaadd_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vaadd_vx_i32m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vaadd_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vaadd_vv_i32m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vaadd_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vaadd_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vaadd_vx_i32m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vaadd_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vaadd_vv_i32m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vaadd_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vaadd_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vaadd_vx_i32m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vaadd_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vaadd_vv_i32m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vaadd_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vaadd_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vaadd_vx_i32m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vaadd_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vaadd_vv_i64m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vaadd_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vaadd_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vaadd_vx_i64m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vaadd_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vaadd_vv_i64m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vaadd_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vaadd_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vaadd_vx_i64m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vaadd_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vaadd_vv_i64m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vaadd_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vaadd_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vaadd_vx_i64m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vaadd_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vaadd_vv_i64m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vaadd_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vaadd_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vaadd_vx_i64m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vaaddu.c b/auto-generated/policy_funcs/llvm-api-tests/vaaddu.c index 60f5d6841..b663cefdd 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vaaddu.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vaaddu.c @@ -5,706 +5,957 @@ #include -vuint8mf8_t test_vaaddu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vaaddu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vaaddu_vv_u8mf8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vaaddu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vaaddu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vaaddu_vx_u8mf8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vaaddu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vaaddu_vv_u8mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vaaddu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vaaddu_vx_u8mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vaaddu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vaaddu_vv_u8mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vaaddu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vaaddu_vx_u8mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vaaddu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u8m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vaaddu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vaaddu_vx_u8m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vaaddu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u8m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vaaddu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vaaddu_vx_u8m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vaaddu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u8m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vaaddu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vaaddu_vx_u8m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vaaddu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u8m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vaaddu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vaaddu_vx_u8m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vaaddu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vaaddu_vv_u16mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vaaddu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vaaddu_vx_u16mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vaaddu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vaaddu_vv_u16mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vaaddu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vaaddu_vx_u16mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vaaddu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vaaddu_vv_u16m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vaaddu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vaaddu_vx_u16m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vaaddu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vaaddu_vv_u16m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vaaddu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vaaddu_vx_u16m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vaaddu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vaaddu_vv_u16m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vaaddu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vaaddu_vx_u16m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vaaddu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vaaddu_vv_u16m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vaaddu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vaaddu_vx_u16m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vaaddu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vaaddu_vv_u32mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vaaddu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vaaddu_vx_u32mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vaaddu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vaaddu_vv_u32m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vaaddu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vaaddu_vx_u32m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vaaddu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vaaddu_vv_u32m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vaaddu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vaaddu_vx_u32m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vaaddu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vaaddu_vv_u32m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vaaddu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vaaddu_vx_u32m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vaaddu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vaaddu_vv_u32m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vaaddu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vaaddu_vx_u32m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vaaddu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vaaddu_vv_u64m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vaaddu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vaaddu_vx_u64m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vaaddu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vaaddu_vv_u64m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vaaddu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vaaddu_vx_u64m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vaaddu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vaaddu_vv_u64m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vaaddu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vaaddu_vx_u64m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vaaddu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vaaddu_vv_u64m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vaaddu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vaaddu_vx_u64m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vaaddu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vaaddu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u8mf8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vaaddu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vaaddu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vaaddu_vx_u8mf8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vaaddu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u8mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vaaddu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vaaddu_vx_u8mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vaaddu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u8mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vaaddu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vaaddu_vx_u8mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vaaddu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vaaddu_vv_u8m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vaaddu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vaaddu_vx_u8m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vaaddu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vaaddu_vv_u8m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vaaddu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vaaddu_vx_u8m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vaaddu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vaaddu_vv_u8m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vaaddu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vaaddu_vx_u8m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vaaddu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vaaddu_vv_u8m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vaaddu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vaaddu_vx_u8m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vaaddu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u16mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vaaddu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vaaddu_vx_u16mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vaaddu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u16mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vaaddu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vaaddu_vx_u16mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vaaddu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u16m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vaaddu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vaaddu_vx_u16m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vaaddu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u16m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vaaddu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vaaddu_vx_u16m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vaaddu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u16m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vaaddu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vaaddu_vx_u16m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vaaddu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u16m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vaaddu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vaaddu_vx_u16m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vaaddu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u32mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vaaddu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vaaddu_vx_u32mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vaaddu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u32m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vaaddu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vaaddu_vx_u32m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vaaddu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u32m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vaaddu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vaaddu_vx_u32m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vaaddu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u32m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vaaddu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vaaddu_vx_u32m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vaaddu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u32m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vaaddu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { return __riscv_vaaddu_vx_u32m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vaaddu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u64m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vaaddu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vaaddu_vx_u64m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vaaddu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u64m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vaaddu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vaaddu_vx_u64m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vaaddu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u64m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vaaddu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vaaddu_vx_u64m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vaaddu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u64m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vaaddu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vaaddu_vx_u64m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vaaddu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vaaddu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u8mf8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vaaddu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vaaddu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vaaddu_vx_u8mf8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vaaddu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u8mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vaaddu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vaaddu_vx_u8mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vaaddu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u8mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vaaddu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vaaddu_vx_u8mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vaaddu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vaaddu_vv_u8m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vaaddu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vaaddu_vx_u8m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vaaddu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vaaddu_vv_u8m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vaaddu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vaaddu_vx_u8m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vaaddu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vaaddu_vv_u8m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vaaddu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vaaddu_vx_u8m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vaaddu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vaaddu_vv_u8m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vaaddu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vaaddu_vx_u8m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vaaddu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u16mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vaaddu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vaaddu_vx_u16mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vaaddu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u16mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vaaddu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vaaddu_vx_u16mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vaaddu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u16m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vaaddu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vaaddu_vx_u16m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vaaddu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u16m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vaaddu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vaaddu_vx_u16m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vaaddu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u16m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vaaddu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vaaddu_vx_u16m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vaaddu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u16m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vaaddu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vaaddu_vx_u16m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vaaddu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u32mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vaaddu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vaaddu_vx_u32mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vaaddu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u32m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vaaddu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vaaddu_vx_u32m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vaaddu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u32m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vaaddu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vaaddu_vx_u32m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vaaddu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u32m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vaaddu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vaaddu_vx_u32m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vaaddu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u32m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vaaddu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vaaddu_vx_u32m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vaaddu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u64m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vaaddu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vaaddu_vx_u64m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vaaddu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u64m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vaaddu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vaaddu_vx_u64m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vaaddu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u64m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vaaddu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vaaddu_vx_u64m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vaaddu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u64m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vaaddu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vaaddu_vx_u64m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vaaddu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vaaddu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u8mf8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vaaddu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vaaddu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vaaddu_vx_u8mf8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vaaddu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u8mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vaaddu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vaaddu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vaaddu_vx_u8mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vaaddu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u8mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vaaddu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vaaddu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vaaddu_vx_u8mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vaaddu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vaaddu_vv_u8m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vaaddu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vaaddu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vaaddu_vx_u8m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vaaddu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vaaddu_vv_u8m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vaaddu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vaaddu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vaaddu_vx_u8m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vaaddu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vaaddu_vv_u8m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vaaddu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vaaddu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vaaddu_vx_u8m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vaaddu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vaaddu_vv_u8m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vaaddu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vaaddu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vaaddu_vx_u8m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vaaddu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u16mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vaaddu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vaaddu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vaaddu_vx_u16mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vaaddu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u16mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vaaddu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vaaddu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vaaddu_vx_u16mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vaaddu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u16m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vaaddu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vaaddu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vaaddu_vx_u16m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vaaddu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u16m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vaaddu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vaaddu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vaaddu_vx_u16m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vaaddu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u16m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vaaddu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vaaddu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vaaddu_vx_u16m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vaaddu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u16m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vaaddu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vaaddu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vaaddu_vx_u16m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vaaddu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u32mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vaaddu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vaaddu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vaaddu_vx_u32mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vaaddu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u32m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vaaddu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vaaddu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vaaddu_vx_u32m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vaaddu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u32m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vaaddu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vaaddu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vaaddu_vx_u32m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vaaddu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u32m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vaaddu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vaaddu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vaaddu_vx_u32m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vaaddu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u32m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vaaddu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vaaddu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { return __riscv_vaaddu_vx_u32m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vaaddu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u64m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vaaddu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vaaddu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vaaddu_vx_u64m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vaaddu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u64m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vaaddu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vaaddu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vaaddu_vx_u64m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vaaddu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u64m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vaaddu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vaaddu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vaaddu_vx_u64m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vaaddu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vaaddu_vv_u64m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vaaddu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vaaddu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vaaddu_vx_u64m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vadc.c b/auto-generated/policy_funcs/llvm-api-tests/vadc.c index 1d02699f4..64f455393 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vadc.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vadc.c @@ -5,354 +5,445 @@ #include -vint8mf8_t test_vadc_vvm_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, vbool64_t v0, size_t vl) { +vint8mf8_t test_vadc_vvm_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, + vbool64_t v0, size_t vl) { return __riscv_vadc_vvm_i8mf8_tu(vd, vs2, vs1, v0, vl); } -vint8mf8_t test_vadc_vxm_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, vbool64_t v0, size_t vl) { +vint8mf8_t test_vadc_vxm_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, + vbool64_t v0, size_t vl) { return __riscv_vadc_vxm_i8mf8_tu(vd, vs2, rs1, v0, vl); } -vint8mf4_t test_vadc_vvm_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, vbool32_t v0, size_t vl) { +vint8mf4_t test_vadc_vvm_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, + vbool32_t v0, size_t vl) { return __riscv_vadc_vvm_i8mf4_tu(vd, vs2, vs1, v0, vl); } -vint8mf4_t test_vadc_vxm_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, vbool32_t v0, size_t vl) { +vint8mf4_t test_vadc_vxm_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, + vbool32_t v0, size_t vl) { return __riscv_vadc_vxm_i8mf4_tu(vd, vs2, rs1, v0, vl); } -vint8mf2_t test_vadc_vvm_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, vbool16_t v0, size_t vl) { +vint8mf2_t test_vadc_vvm_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, + vbool16_t v0, size_t vl) { return __riscv_vadc_vvm_i8mf2_tu(vd, vs2, vs1, v0, vl); } -vint8mf2_t test_vadc_vxm_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, vbool16_t v0, size_t vl) { +vint8mf2_t test_vadc_vxm_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, + vbool16_t v0, size_t vl) { return __riscv_vadc_vxm_i8mf2_tu(vd, vs2, rs1, v0, vl); } -vint8m1_t test_vadc_vvm_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, vbool8_t v0, size_t vl) { +vint8m1_t test_vadc_vvm_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, + vbool8_t v0, size_t vl) { return __riscv_vadc_vvm_i8m1_tu(vd, vs2, vs1, v0, vl); } -vint8m1_t test_vadc_vxm_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, vbool8_t v0, size_t vl) { +vint8m1_t test_vadc_vxm_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, + vbool8_t v0, size_t vl) { return __riscv_vadc_vxm_i8m1_tu(vd, vs2, rs1, v0, vl); } -vint8m2_t test_vadc_vvm_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, vbool4_t v0, size_t vl) { +vint8m2_t test_vadc_vvm_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, + vbool4_t v0, size_t vl) { return __riscv_vadc_vvm_i8m2_tu(vd, vs2, vs1, v0, vl); } -vint8m2_t test_vadc_vxm_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, vbool4_t v0, size_t vl) { +vint8m2_t test_vadc_vxm_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, + vbool4_t v0, size_t vl) { return __riscv_vadc_vxm_i8m2_tu(vd, vs2, rs1, v0, vl); } -vint8m4_t test_vadc_vvm_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, vbool2_t v0, size_t vl) { +vint8m4_t test_vadc_vvm_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, + vbool2_t v0, size_t vl) { return __riscv_vadc_vvm_i8m4_tu(vd, vs2, vs1, v0, vl); } -vint8m4_t test_vadc_vxm_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, vbool2_t v0, size_t vl) { +vint8m4_t test_vadc_vxm_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, + vbool2_t v0, size_t vl) { return __riscv_vadc_vxm_i8m4_tu(vd, vs2, rs1, v0, vl); } -vint8m8_t test_vadc_vvm_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, vbool1_t v0, size_t vl) { +vint8m8_t test_vadc_vvm_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, + vbool1_t v0, size_t vl) { return __riscv_vadc_vvm_i8m8_tu(vd, vs2, vs1, v0, vl); } -vint8m8_t test_vadc_vxm_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, vbool1_t v0, size_t vl) { +vint8m8_t test_vadc_vxm_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, + vbool1_t v0, size_t vl) { return __riscv_vadc_vxm_i8m8_tu(vd, vs2, rs1, v0, vl); } -vint16mf4_t test_vadc_vvm_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, vbool64_t v0, size_t vl) { +vint16mf4_t test_vadc_vvm_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + vint16mf4_t vs1, vbool64_t v0, size_t vl) { return __riscv_vadc_vvm_i16mf4_tu(vd, vs2, vs1, v0, vl); } -vint16mf4_t test_vadc_vxm_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, vbool64_t v0, size_t vl) { +vint16mf4_t test_vadc_vxm_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + int16_t rs1, vbool64_t v0, size_t vl) { return __riscv_vadc_vxm_i16mf4_tu(vd, vs2, rs1, v0, vl); } -vint16mf2_t test_vadc_vvm_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, vbool32_t v0, size_t vl) { +vint16mf2_t test_vadc_vvm_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + vint16mf2_t vs1, vbool32_t v0, size_t vl) { return __riscv_vadc_vvm_i16mf2_tu(vd, vs2, vs1, v0, vl); } -vint16mf2_t test_vadc_vxm_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, vbool32_t v0, size_t vl) { +vint16mf2_t test_vadc_vxm_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + int16_t rs1, vbool32_t v0, size_t vl) { return __riscv_vadc_vxm_i16mf2_tu(vd, vs2, rs1, v0, vl); } -vint16m1_t test_vadc_vvm_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, vbool16_t v0, size_t vl) { +vint16m1_t test_vadc_vvm_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, + vbool16_t v0, size_t vl) { return __riscv_vadc_vvm_i16m1_tu(vd, vs2, vs1, v0, vl); } -vint16m1_t test_vadc_vxm_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, vbool16_t v0, size_t vl) { +vint16m1_t test_vadc_vxm_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, + vbool16_t v0, size_t vl) { return __riscv_vadc_vxm_i16m1_tu(vd, vs2, rs1, v0, vl); } -vint16m2_t test_vadc_vvm_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, vbool8_t v0, size_t vl) { +vint16m2_t test_vadc_vvm_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, + vbool8_t v0, size_t vl) { return __riscv_vadc_vvm_i16m2_tu(vd, vs2, vs1, v0, vl); } -vint16m2_t test_vadc_vxm_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, vbool8_t v0, size_t vl) { +vint16m2_t test_vadc_vxm_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, + vbool8_t v0, size_t vl) { return __riscv_vadc_vxm_i16m2_tu(vd, vs2, rs1, v0, vl); } -vint16m4_t test_vadc_vvm_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, vbool4_t v0, size_t vl) { +vint16m4_t test_vadc_vvm_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, + vbool4_t v0, size_t vl) { return __riscv_vadc_vvm_i16m4_tu(vd, vs2, vs1, v0, vl); } -vint16m4_t test_vadc_vxm_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, vbool4_t v0, size_t vl) { +vint16m4_t test_vadc_vxm_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, + vbool4_t v0, size_t vl) { return __riscv_vadc_vxm_i16m4_tu(vd, vs2, rs1, v0, vl); } -vint16m8_t test_vadc_vvm_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, vbool2_t v0, size_t vl) { +vint16m8_t test_vadc_vvm_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, + vbool2_t v0, size_t vl) { return __riscv_vadc_vvm_i16m8_tu(vd, vs2, vs1, v0, vl); } -vint16m8_t test_vadc_vxm_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, vbool2_t v0, size_t vl) { +vint16m8_t test_vadc_vxm_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, + vbool2_t v0, size_t vl) { return __riscv_vadc_vxm_i16m8_tu(vd, vs2, rs1, v0, vl); } -vint32mf2_t test_vadc_vvm_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, vbool64_t v0, size_t vl) { +vint32mf2_t test_vadc_vvm_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + vint32mf2_t vs1, vbool64_t v0, size_t vl) { return __riscv_vadc_vvm_i32mf2_tu(vd, vs2, vs1, v0, vl); } -vint32mf2_t test_vadc_vxm_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, vbool64_t v0, size_t vl) { +vint32mf2_t test_vadc_vxm_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + int32_t rs1, vbool64_t v0, size_t vl) { return __riscv_vadc_vxm_i32mf2_tu(vd, vs2, rs1, v0, vl); } -vint32m1_t test_vadc_vvm_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, vbool32_t v0, size_t vl) { +vint32m1_t test_vadc_vvm_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, + vbool32_t v0, size_t vl) { return __riscv_vadc_vvm_i32m1_tu(vd, vs2, vs1, v0, vl); } -vint32m1_t test_vadc_vxm_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, vbool32_t v0, size_t vl) { +vint32m1_t test_vadc_vxm_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, + vbool32_t v0, size_t vl) { return __riscv_vadc_vxm_i32m1_tu(vd, vs2, rs1, v0, vl); } -vint32m2_t test_vadc_vvm_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, vbool16_t v0, size_t vl) { +vint32m2_t test_vadc_vvm_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, + vbool16_t v0, size_t vl) { return __riscv_vadc_vvm_i32m2_tu(vd, vs2, vs1, v0, vl); } -vint32m2_t test_vadc_vxm_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, vbool16_t v0, size_t vl) { +vint32m2_t test_vadc_vxm_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, + vbool16_t v0, size_t vl) { return __riscv_vadc_vxm_i32m2_tu(vd, vs2, rs1, v0, vl); } -vint32m4_t test_vadc_vvm_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, vbool8_t v0, size_t vl) { +vint32m4_t test_vadc_vvm_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, + vbool8_t v0, size_t vl) { return __riscv_vadc_vvm_i32m4_tu(vd, vs2, vs1, v0, vl); } -vint32m4_t test_vadc_vxm_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, vbool8_t v0, size_t vl) { +vint32m4_t test_vadc_vxm_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, + vbool8_t v0, size_t vl) { return __riscv_vadc_vxm_i32m4_tu(vd, vs2, rs1, v0, vl); } -vint32m8_t test_vadc_vvm_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, vbool4_t v0, size_t vl) { +vint32m8_t test_vadc_vvm_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, + vbool4_t v0, size_t vl) { return __riscv_vadc_vvm_i32m8_tu(vd, vs2, vs1, v0, vl); } -vint32m8_t test_vadc_vxm_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, vbool4_t v0, size_t vl) { +vint32m8_t test_vadc_vxm_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, + vbool4_t v0, size_t vl) { return __riscv_vadc_vxm_i32m8_tu(vd, vs2, rs1, v0, vl); } -vint64m1_t test_vadc_vvm_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, vbool64_t v0, size_t vl) { +vint64m1_t test_vadc_vvm_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, + vbool64_t v0, size_t vl) { return __riscv_vadc_vvm_i64m1_tu(vd, vs2, vs1, v0, vl); } -vint64m1_t test_vadc_vxm_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, vbool64_t v0, size_t vl) { +vint64m1_t test_vadc_vxm_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, + vbool64_t v0, size_t vl) { return __riscv_vadc_vxm_i64m1_tu(vd, vs2, rs1, v0, vl); } -vint64m2_t test_vadc_vvm_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, vbool32_t v0, size_t vl) { +vint64m2_t test_vadc_vvm_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, + vbool32_t v0, size_t vl) { return __riscv_vadc_vvm_i64m2_tu(vd, vs2, vs1, v0, vl); } -vint64m2_t test_vadc_vxm_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, vbool32_t v0, size_t vl) { +vint64m2_t test_vadc_vxm_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, + vbool32_t v0, size_t vl) { return __riscv_vadc_vxm_i64m2_tu(vd, vs2, rs1, v0, vl); } -vint64m4_t test_vadc_vvm_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, vbool16_t v0, size_t vl) { +vint64m4_t test_vadc_vvm_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, + vbool16_t v0, size_t vl) { return __riscv_vadc_vvm_i64m4_tu(vd, vs2, vs1, v0, vl); } -vint64m4_t test_vadc_vxm_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, vbool16_t v0, size_t vl) { +vint64m4_t test_vadc_vxm_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, + vbool16_t v0, size_t vl) { return __riscv_vadc_vxm_i64m4_tu(vd, vs2, rs1, v0, vl); } -vint64m8_t test_vadc_vvm_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, vbool8_t v0, size_t vl) { +vint64m8_t test_vadc_vvm_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, + vbool8_t v0, size_t vl) { return __riscv_vadc_vvm_i64m8_tu(vd, vs2, vs1, v0, vl); } -vint64m8_t test_vadc_vxm_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, vbool8_t v0, size_t vl) { +vint64m8_t test_vadc_vxm_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, + vbool8_t v0, size_t vl) { return __riscv_vadc_vxm_i64m8_tu(vd, vs2, rs1, v0, vl); } -vuint8mf8_t test_vadc_vvm_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, vbool64_t v0, size_t vl) { +vuint8mf8_t test_vadc_vvm_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, vbool64_t v0, size_t vl) { return __riscv_vadc_vvm_u8mf8_tu(vd, vs2, vs1, v0, vl); } -vuint8mf8_t test_vadc_vxm_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, vbool64_t v0, size_t vl) { +vuint8mf8_t test_vadc_vxm_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, + vbool64_t v0, size_t vl) { return __riscv_vadc_vxm_u8mf8_tu(vd, vs2, rs1, v0, vl); } -vuint8mf4_t test_vadc_vvm_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, vbool32_t v0, size_t vl) { +vuint8mf4_t test_vadc_vvm_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, vbool32_t v0, size_t vl) { return __riscv_vadc_vvm_u8mf4_tu(vd, vs2, vs1, v0, vl); } -vuint8mf4_t test_vadc_vxm_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, vbool32_t v0, size_t vl) { +vuint8mf4_t test_vadc_vxm_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, + vbool32_t v0, size_t vl) { return __riscv_vadc_vxm_u8mf4_tu(vd, vs2, rs1, v0, vl); } -vuint8mf2_t test_vadc_vvm_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, vbool16_t v0, size_t vl) { +vuint8mf2_t test_vadc_vvm_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, vbool16_t v0, size_t vl) { return __riscv_vadc_vvm_u8mf2_tu(vd, vs2, vs1, v0, vl); } -vuint8mf2_t test_vadc_vxm_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, vbool16_t v0, size_t vl) { +vuint8mf2_t test_vadc_vxm_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, + vbool16_t v0, size_t vl) { return __riscv_vadc_vxm_u8mf2_tu(vd, vs2, rs1, v0, vl); } -vuint8m1_t test_vadc_vvm_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, vbool8_t v0, size_t vl) { +vuint8m1_t test_vadc_vvm_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + vbool8_t v0, size_t vl) { return __riscv_vadc_vvm_u8m1_tu(vd, vs2, vs1, v0, vl); } -vuint8m1_t test_vadc_vxm_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, vbool8_t v0, size_t vl) { +vuint8m1_t test_vadc_vxm_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + vbool8_t v0, size_t vl) { return __riscv_vadc_vxm_u8m1_tu(vd, vs2, rs1, v0, vl); } -vuint8m2_t test_vadc_vvm_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, vbool4_t v0, size_t vl) { +vuint8m2_t test_vadc_vvm_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + vbool4_t v0, size_t vl) { return __riscv_vadc_vvm_u8m2_tu(vd, vs2, vs1, v0, vl); } -vuint8m2_t test_vadc_vxm_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, vbool4_t v0, size_t vl) { +vuint8m2_t test_vadc_vxm_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + vbool4_t v0, size_t vl) { return __riscv_vadc_vxm_u8m2_tu(vd, vs2, rs1, v0, vl); } -vuint8m4_t test_vadc_vvm_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, vbool2_t v0, size_t vl) { +vuint8m4_t test_vadc_vvm_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + vbool2_t v0, size_t vl) { return __riscv_vadc_vvm_u8m4_tu(vd, vs2, vs1, v0, vl); } -vuint8m4_t test_vadc_vxm_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, vbool2_t v0, size_t vl) { +vuint8m4_t test_vadc_vxm_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + vbool2_t v0, size_t vl) { return __riscv_vadc_vxm_u8m4_tu(vd, vs2, rs1, v0, vl); } -vuint8m8_t test_vadc_vvm_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, vbool1_t v0, size_t vl) { +vuint8m8_t test_vadc_vvm_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + vbool1_t v0, size_t vl) { return __riscv_vadc_vvm_u8m8_tu(vd, vs2, vs1, v0, vl); } -vuint8m8_t test_vadc_vxm_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, vbool1_t v0, size_t vl) { +vuint8m8_t test_vadc_vxm_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + vbool1_t v0, size_t vl) { return __riscv_vadc_vxm_u8m8_tu(vd, vs2, rs1, v0, vl); } -vuint16mf4_t test_vadc_vvm_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, vbool64_t v0, size_t vl) { +vuint16mf4_t test_vadc_vvm_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, vbool64_t v0, + size_t vl) { return __riscv_vadc_vvm_u16mf4_tu(vd, vs2, vs1, v0, vl); } -vuint16mf4_t test_vadc_vxm_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, vbool64_t v0, size_t vl) { +vuint16mf4_t test_vadc_vxm_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, vbool64_t v0, size_t vl) { return __riscv_vadc_vxm_u16mf4_tu(vd, vs2, rs1, v0, vl); } -vuint16mf2_t test_vadc_vvm_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, vbool32_t v0, size_t vl) { +vuint16mf2_t test_vadc_vvm_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, vbool32_t v0, + size_t vl) { return __riscv_vadc_vvm_u16mf2_tu(vd, vs2, vs1, v0, vl); } -vuint16mf2_t test_vadc_vxm_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, vbool32_t v0, size_t vl) { +vuint16mf2_t test_vadc_vxm_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, vbool32_t v0, size_t vl) { return __riscv_vadc_vxm_u16mf2_tu(vd, vs2, rs1, v0, vl); } -vuint16m1_t test_vadc_vvm_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, vbool16_t v0, size_t vl) { +vuint16m1_t test_vadc_vvm_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, vbool16_t v0, size_t vl) { return __riscv_vadc_vvm_u16m1_tu(vd, vs2, vs1, v0, vl); } -vuint16m1_t test_vadc_vxm_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, vbool16_t v0, size_t vl) { +vuint16m1_t test_vadc_vxm_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, vbool16_t v0, size_t vl) { return __riscv_vadc_vxm_u16m1_tu(vd, vs2, rs1, v0, vl); } -vuint16m2_t test_vadc_vvm_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, vbool8_t v0, size_t vl) { +vuint16m2_t test_vadc_vvm_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, vbool8_t v0, size_t vl) { return __riscv_vadc_vvm_u16m2_tu(vd, vs2, vs1, v0, vl); } -vuint16m2_t test_vadc_vxm_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, vbool8_t v0, size_t vl) { +vuint16m2_t test_vadc_vxm_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, vbool8_t v0, size_t vl) { return __riscv_vadc_vxm_u16m2_tu(vd, vs2, rs1, v0, vl); } -vuint16m4_t test_vadc_vvm_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, vbool4_t v0, size_t vl) { +vuint16m4_t test_vadc_vvm_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, vbool4_t v0, size_t vl) { return __riscv_vadc_vvm_u16m4_tu(vd, vs2, vs1, v0, vl); } -vuint16m4_t test_vadc_vxm_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, vbool4_t v0, size_t vl) { +vuint16m4_t test_vadc_vxm_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, vbool4_t v0, size_t vl) { return __riscv_vadc_vxm_u16m4_tu(vd, vs2, rs1, v0, vl); } -vuint16m8_t test_vadc_vvm_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, vbool2_t v0, size_t vl) { +vuint16m8_t test_vadc_vvm_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, vbool2_t v0, size_t vl) { return __riscv_vadc_vvm_u16m8_tu(vd, vs2, vs1, v0, vl); } -vuint16m8_t test_vadc_vxm_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, vbool2_t v0, size_t vl) { +vuint16m8_t test_vadc_vxm_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, vbool2_t v0, size_t vl) { return __riscv_vadc_vxm_u16m8_tu(vd, vs2, rs1, v0, vl); } -vuint32mf2_t test_vadc_vvm_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, vbool64_t v0, size_t vl) { +vuint32mf2_t test_vadc_vvm_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, vbool64_t v0, + size_t vl) { return __riscv_vadc_vvm_u32mf2_tu(vd, vs2, vs1, v0, vl); } -vuint32mf2_t test_vadc_vxm_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, vbool64_t v0, size_t vl) { +vuint32mf2_t test_vadc_vxm_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, vbool64_t v0, size_t vl) { return __riscv_vadc_vxm_u32mf2_tu(vd, vs2, rs1, v0, vl); } -vuint32m1_t test_vadc_vvm_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, vbool32_t v0, size_t vl) { +vuint32m1_t test_vadc_vvm_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, vbool32_t v0, size_t vl) { return __riscv_vadc_vvm_u32m1_tu(vd, vs2, vs1, v0, vl); } -vuint32m1_t test_vadc_vxm_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, vbool32_t v0, size_t vl) { +vuint32m1_t test_vadc_vxm_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, vbool32_t v0, size_t vl) { return __riscv_vadc_vxm_u32m1_tu(vd, vs2, rs1, v0, vl); } -vuint32m2_t test_vadc_vvm_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, vbool16_t v0, size_t vl) { +vuint32m2_t test_vadc_vvm_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, vbool16_t v0, size_t vl) { return __riscv_vadc_vvm_u32m2_tu(vd, vs2, vs1, v0, vl); } -vuint32m2_t test_vadc_vxm_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, vbool16_t v0, size_t vl) { +vuint32m2_t test_vadc_vxm_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, vbool16_t v0, size_t vl) { return __riscv_vadc_vxm_u32m2_tu(vd, vs2, rs1, v0, vl); } -vuint32m4_t test_vadc_vvm_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, vbool8_t v0, size_t vl) { +vuint32m4_t test_vadc_vvm_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, vbool8_t v0, size_t vl) { return __riscv_vadc_vvm_u32m4_tu(vd, vs2, vs1, v0, vl); } -vuint32m4_t test_vadc_vxm_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, vbool8_t v0, size_t vl) { +vuint32m4_t test_vadc_vxm_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, vbool8_t v0, size_t vl) { return __riscv_vadc_vxm_u32m4_tu(vd, vs2, rs1, v0, vl); } -vuint32m8_t test_vadc_vvm_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, vbool4_t v0, size_t vl) { +vuint32m8_t test_vadc_vvm_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, vbool4_t v0, size_t vl) { return __riscv_vadc_vvm_u32m8_tu(vd, vs2, vs1, v0, vl); } -vuint32m8_t test_vadc_vxm_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, vbool4_t v0, size_t vl) { +vuint32m8_t test_vadc_vxm_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, vbool4_t v0, size_t vl) { return __riscv_vadc_vxm_u32m8_tu(vd, vs2, rs1, v0, vl); } -vuint64m1_t test_vadc_vvm_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, vbool64_t v0, size_t vl) { +vuint64m1_t test_vadc_vvm_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, vbool64_t v0, size_t vl) { return __riscv_vadc_vvm_u64m1_tu(vd, vs2, vs1, v0, vl); } -vuint64m1_t test_vadc_vxm_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, vbool64_t v0, size_t vl) { +vuint64m1_t test_vadc_vxm_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, vbool64_t v0, size_t vl) { return __riscv_vadc_vxm_u64m1_tu(vd, vs2, rs1, v0, vl); } -vuint64m2_t test_vadc_vvm_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, vbool32_t v0, size_t vl) { +vuint64m2_t test_vadc_vvm_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, vbool32_t v0, size_t vl) { return __riscv_vadc_vvm_u64m2_tu(vd, vs2, vs1, v0, vl); } -vuint64m2_t test_vadc_vxm_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, vbool32_t v0, size_t vl) { +vuint64m2_t test_vadc_vxm_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, vbool32_t v0, size_t vl) { return __riscv_vadc_vxm_u64m2_tu(vd, vs2, rs1, v0, vl); } -vuint64m4_t test_vadc_vvm_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, vbool16_t v0, size_t vl) { +vuint64m4_t test_vadc_vvm_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, vbool16_t v0, size_t vl) { return __riscv_vadc_vvm_u64m4_tu(vd, vs2, vs1, v0, vl); } -vuint64m4_t test_vadc_vxm_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, vbool16_t v0, size_t vl) { +vuint64m4_t test_vadc_vxm_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, vbool16_t v0, size_t vl) { return __riscv_vadc_vxm_u64m4_tu(vd, vs2, rs1, v0, vl); } -vuint64m8_t test_vadc_vvm_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, vbool8_t v0, size_t vl) { +vuint64m8_t test_vadc_vvm_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, vbool8_t v0, size_t vl) { return __riscv_vadc_vvm_u64m8_tu(vd, vs2, vs1, v0, vl); } -vuint64m8_t test_vadc_vxm_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, vbool8_t v0, size_t vl) { +vuint64m8_t test_vadc_vxm_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, vbool8_t v0, size_t vl) { return __riscv_vadc_vxm_u64m8_tu(vd, vs2, rs1, v0, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vadd.c b/auto-generated/policy_funcs/llvm-api-tests/vadd.c index 24042676a..69a25c0c7 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vadd.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vadd.c @@ -5,1410 +5,1810 @@ #include -vint8mf8_t test_vadd_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vadd_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, + size_t vl) { return __riscv_vadd_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vadd_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vadd_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vadd_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vadd_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vadd_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, + size_t vl) { return __riscv_vadd_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vadd_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vadd_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vadd_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vadd_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vadd_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, + size_t vl) { return __riscv_vadd_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vadd_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vadd_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vadd_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vadd_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vadd_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vadd_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vadd_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vadd_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, + size_t vl) { return __riscv_vadd_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vadd_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vadd_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, + size_t vl) { return __riscv_vadd_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vadd_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vadd_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vadd_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vadd_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vadd_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, + size_t vl) { return __riscv_vadd_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vadd_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vadd_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vadd_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vadd_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vadd_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, + size_t vl) { return __riscv_vadd_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vadd_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vadd_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vadd_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vadd_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vadd_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + vint16mf4_t vs1, size_t vl) { return __riscv_vadd_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vadd_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vadd_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vadd_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vadd_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vadd_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vadd_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vadd_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vadd_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vadd_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vadd_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vadd_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vadd_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vadd_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vadd_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, + size_t vl) { return __riscv_vadd_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vadd_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vadd_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, + size_t vl) { return __riscv_vadd_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vadd_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vadd_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vadd_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vadd_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vadd_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, + size_t vl) { return __riscv_vadd_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vadd_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vadd_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vadd_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vadd_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vadd_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, + size_t vl) { return __riscv_vadd_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vadd_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vadd_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, + size_t vl) { return __riscv_vadd_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vadd_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vadd_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vadd_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vadd_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vadd_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vadd_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vadd_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vadd_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vadd_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vadd_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vadd_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, + size_t vl) { return __riscv_vadd_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vadd_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vadd_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, + size_t vl) { return __riscv_vadd_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vadd_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vadd_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vadd_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vadd_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vadd_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, + size_t vl) { return __riscv_vadd_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vadd_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vadd_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, + size_t vl) { return __riscv_vadd_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vadd_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vadd_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, + size_t vl) { return __riscv_vadd_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vadd_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vadd_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, + size_t vl) { return __riscv_vadd_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vadd_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vadd_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vadd_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vadd_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vadd_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, + size_t vl) { return __riscv_vadd_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vadd_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vadd_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, + size_t vl) { return __riscv_vadd_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vadd_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vadd_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, + size_t vl) { return __riscv_vadd_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vadd_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vadd_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, + size_t vl) { return __riscv_vadd_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vadd_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vadd_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, + size_t vl) { return __riscv_vadd_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vadd_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vadd_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, + size_t vl) { return __riscv_vadd_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vadd_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vadd_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, + size_t vl) { return __riscv_vadd_vx_i64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vadd_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vadd_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vadd_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vadd_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vadd_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vadd_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vadd_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vadd_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vadd_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vadd_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vadd_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vadd_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vadd_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vadd_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vadd_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vadd_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vadd_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vadd_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vadd_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vadd_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vadd_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vadd_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vadd_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vadd_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vadd_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vadd_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vadd_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vadd_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vadd_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vadd_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vadd_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vadd_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vadd_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vadd_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vadd_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vadd_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vadd_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vadd_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { return __riscv_vadd_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vadd_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vadd_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vadd_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vadd_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vadd_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vadd_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vadd_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vadd_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vadd_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vadd_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vadd_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vadd_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vadd_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vadd_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vadd_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vadd_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vadd_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vadd_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vadd_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vadd_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vadd_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vadd_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vadd_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vadd_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vadd_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vadd_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vadd_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vadd_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vadd_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vadd_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vadd_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vadd_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vadd_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vadd_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vadd_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vadd_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vadd_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vadd_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vadd_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vadd_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vadd_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vadd_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vadd_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vadd_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vadd_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vadd_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vadd_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vadd_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vadd_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vadd_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vadd_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vadd_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vadd_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vadd_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vadd_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vadd_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vadd_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vadd_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vadd_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vadd_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vadd_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vadd_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vadd_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vadd_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vadd_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vadd_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vadd_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vadd_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vadd_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vadd_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vadd_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vadd_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vadd_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vadd_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vadd_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vadd_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vadd_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vadd_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vadd_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vadd_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vadd_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vadd_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vadd_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vadd_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vadd_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vadd_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vadd_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vadd_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vadd_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vadd_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vadd_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vadd_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vadd_vx_u64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vadd_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vadd_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vadd_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vadd_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vadd_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vadd_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vadd_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vadd_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vadd_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vadd_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vadd_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vadd_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vadd_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vadd_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vadd_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vadd_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vadd_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vadd_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vadd_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vadd_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vadd_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vadd_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vadd_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vadd_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vadd_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vadd_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vadd_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vadd_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vadd_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vadd_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vadd_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vadd_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vadd_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vadd_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vadd_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vadd_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vadd_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vadd_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vadd_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vadd_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vadd_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vadd_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vadd_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vadd_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vadd_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vadd_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vadd_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vadd_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vadd_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vadd_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vadd_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vadd_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vadd_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vadd_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vadd_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vadd_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vadd_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vadd_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vadd_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vadd_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vadd_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vadd_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vadd_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vadd_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vadd_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vadd_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vadd_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vadd_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vadd_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vadd_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vadd_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vadd_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vadd_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vadd_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vadd_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vadd_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vadd_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vadd_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vadd_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vadd_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vadd_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vadd_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vadd_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vadd_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vadd_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vadd_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vadd_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vadd_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vadd_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vadd_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vadd_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vadd_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vadd_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vadd_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vadd_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vadd_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vadd_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vadd_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vadd_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vadd_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vadd_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vadd_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vadd_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vadd_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vadd_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vadd_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vadd_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vadd_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vadd_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vadd_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vadd_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vadd_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vadd_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vadd_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vadd_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vadd_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vadd_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vadd_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vadd_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vadd_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vadd_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vadd_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vadd_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vadd_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vadd_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vadd_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vadd_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vadd_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vadd_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vadd_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vadd_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vadd_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vadd_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vadd_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vadd_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vadd_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vadd_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vadd_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vadd_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vadd_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vadd_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vadd_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vadd_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vadd_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vadd_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vadd_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vadd_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vadd_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vadd_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vadd_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vadd_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vadd_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vadd_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vadd_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vadd_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vadd_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vadd_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vadd_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vadd_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vadd_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vadd_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vadd_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vadd_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vadd_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vadd_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vadd_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vadd_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vadd_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vadd_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vadd_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vadd_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vadd_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vadd_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vadd_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vadd_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vadd_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vadd_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vadd_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vadd_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vadd_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vadd_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vadd_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vadd_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vadd_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vadd_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vadd_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vadd_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vadd_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vadd_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vadd_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vadd_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vadd_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vadd_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vadd_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vadd_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vadd_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vadd_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vadd_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vadd_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vadd_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vadd_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vadd_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vadd_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vadd_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vadd_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vadd_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vadd_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vadd_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vadd_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vadd_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vadd_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vadd_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vadd_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vadd_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vadd_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vadd_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vadd_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vadd_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vadd_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vadd_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vadd_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vadd_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vadd_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vadd_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vadd_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vadd_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vadd_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vadd_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vadd_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vadd_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vadd_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vadd_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vadd_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vadd_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vadd_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vadd_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vadd_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vadd_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vadd_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vadd_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vadd_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vadd_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vadd_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vadd_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vadd_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vadd_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vadd_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vadd_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vadd_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vadd_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vadd_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vadd_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vadd_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vadd_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vadd_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vadd_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vadd_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vadd_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vadd_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vadd_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vadd_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vadd_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vadd_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vadd_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vadd_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vadd_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vadd_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vadd_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vadd_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vadd_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vadd_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vadd_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vadd_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vadd_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vadd_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vadd_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vadd_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vadd_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vadd_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vadd_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vadd_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vadd_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vadd_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vadd_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vadd_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vadd_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vadd_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vadd_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vadd_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vadd_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vadd_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vadd_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vadd_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vadd_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vadd_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vadd_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vadd_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vadd_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vadd_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vadd_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vadd_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vadd_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vadd_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vadd_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vadd_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vadd_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vadd_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vadd_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vadd_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vadd_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vadd_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vadd_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vadd_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vadd_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vadd_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vadd_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vadd_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vadd_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vadd_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vadd_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vadd_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vadd_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vadd_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vadd_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vadd_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vadd_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vadd_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vadd_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vadd_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vadd_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vadd_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vadd_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vadd_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vadd_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vadd_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vadd_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vadd_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vadd_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vadd_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vadd_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vadd_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vadd_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vadd_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vadd_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vadd_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vadd_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vadd_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vadd_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vadd_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vadd_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vadd_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vadd_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vadd_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vadd_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vadd_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vadd_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vadd_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vadd_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vadd_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vadd_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vadd_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vadd_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vadd_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vadd_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vadd_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vadd_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vadd_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vadd_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vadd_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vadd_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vadd_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vadd_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vadd_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vadd_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vadd_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vadd_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vadd_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vadd_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vadd_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vadd_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vadd_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vadd_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vadd_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vadd_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vadd_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vadd_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vadd_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vadd_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vadd_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vadd_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vadd_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vadd_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vadd_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vadd_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vadd_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vadd_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vadd_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vadd_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vadd_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vadd_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vadd_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vadd_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vadd_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vadd_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vadd_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vadd_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vadd_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vadd_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vadd_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vadd_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vadd_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vadd_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vadd_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vadd_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vadd_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vadd_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vadd_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vadd_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vadd_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vadd_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vadd_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vadd_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vadd_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vadd_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vadd_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vadd_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vadd_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vadd_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vadd_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vadd_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vadd_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vadd_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vadd_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vadd_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vadd_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vadd_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vadd_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vadd_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vadd_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vadd_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vadd_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vadd_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vadd_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vadd_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vadd_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vadd_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vadd_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vadd_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vadd_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vadd_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vadd_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vadd_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vadd_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vadd_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vadd_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vadd_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vadd_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vadd_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vadd_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vadd_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vadd_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vadd_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vadd_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vadd_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vadd_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vadd_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vadd_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vadd_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vadd_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vadd_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vadd_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vadd_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vadd_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vadd_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vadd_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vadd_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vadd_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vadd_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vadd_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vadd_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vadd_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vadd_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vadd_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vadd_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vadd_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vadd_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vadd_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vadd_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vadd_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vadd_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vadd_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vadd_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vadd_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vadd_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vadd_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vadd_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vadd_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vadd_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vadd_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vadd_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vadd_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vadd_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vadd_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { return __riscv_vadd_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vadd_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vadd_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vadd_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vadd_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vadd_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vadd_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vadd_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vadd_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vadd_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vadd_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vadd_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vadd_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vadd_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vadd_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vadd_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vadd_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vadd_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vadd_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vadd_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vadd_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vadd_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vadd_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vadd_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vadd_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vadd_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vadd_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vadd_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vadd_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vadd_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vadd_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vadd_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vadd_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vadd_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vadd_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vadd_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vadd_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vadd_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vadd_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vadd_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vadd_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vadd_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vadd_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vadd_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vadd_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vadd_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vadd_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vadd_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vadd_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vadd_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vadd_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vadd_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vadd_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vadd_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vadd_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vadd_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vadd_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vadd_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vadd_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vadd_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vadd_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vadd_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vadd_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vadd_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vadd_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vadd_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vadd_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vadd_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vadd_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vadd_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vadd_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vadd_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vadd_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vadd_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vadd_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vadd_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vadd_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vadd_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vadd_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vadd_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vadd_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vadd_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vadd_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vadd_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vadd_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vadd_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vadd_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vadd_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vadd_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vadd_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vadd_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vadd_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vadd_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vadd_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vadd_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vadd_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vadd_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vadd_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vadd_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vadd_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vadd_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vadd_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vadd_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vadd_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vadd_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vadd_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vadd_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vadd_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vadd_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vadd_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vadd_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vadd_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vadd_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vadd_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vadd_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vadd_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vadd_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vadd_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vadd_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vadd_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vadd_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vadd_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vadd_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vadd_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vadd_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vadd_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vadd_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vadd_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vadd_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vadd_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vadd_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vadd_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vadd_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vadd_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vadd_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vadd_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vadd_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vadd_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vadd_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vadd_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vadd_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vadd_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vadd_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vadd_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vadd_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vadd_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vadd_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vadd_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vadd_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vadd_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vadd_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vadd_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vadd_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vadd_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vadd_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vadd_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vadd_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vadd_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vadd_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vadd_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vadd_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vadd_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vadd_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vadd_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vadd_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vadd_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vadd_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vadd_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vadd_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vadd_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vadd_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vadd_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vadd_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vadd_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vadd_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vadd_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vadd_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vadd_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vadd_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vadd_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vadd_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vadd_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vadd_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vadd_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vadd_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vadd_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vadd_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vadd_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vadd_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vadd_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vadd_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vadd_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vadd_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vadd_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vadd_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vadd_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vadd_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vadd_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vadd_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vadd_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vadd_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vadd_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vadd_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vadd_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vadd_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vadd_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vadd_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vadd_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vadd_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vadd_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vadd_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vadd_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vadd_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vadd_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vadd_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vadd_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vadd_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vadd_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vadd_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vadd_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vadd_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vadd_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vadd_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vadd_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vadd_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vadd_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vadd_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vadd_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vadd_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vadd_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vadd_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vadd_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vadd_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vadd_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vadd_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vadd_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vadd_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vadd_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vadd_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vadd_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vadd_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vadd_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vadd_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vadd_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vadd_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vadd_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vadd_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vadd_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vadd_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vadd_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vadd_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vadd_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vadd_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vadd_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vadd_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vadd_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vadd_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vadd_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vadd_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vadd_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vadd_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vadd_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vadd_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vadd_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vadd_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vadd_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vadd_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vadd_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vadd_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vadd_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vadd_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vadd_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vadd_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vadd_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vadd_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vadd_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vadd_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vadd_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vadd_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vadd_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vadd_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vadd_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vadd_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vadd_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vadd_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vadd_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vadd_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vadd_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vadd_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vand.c b/auto-generated/policy_funcs/llvm-api-tests/vand.c index 97cfcb672..ba442c905 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vand.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vand.c @@ -5,1410 +5,1810 @@ #include -vint8mf8_t test_vand_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vand_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, + size_t vl) { return __riscv_vand_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vand_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vand_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vand_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vand_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vand_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, + size_t vl) { return __riscv_vand_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vand_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vand_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vand_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vand_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vand_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, + size_t vl) { return __riscv_vand_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vand_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vand_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vand_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vand_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vand_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vand_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vand_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vand_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, + size_t vl) { return __riscv_vand_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vand_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vand_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, + size_t vl) { return __riscv_vand_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vand_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vand_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vand_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vand_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vand_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, + size_t vl) { return __riscv_vand_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vand_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vand_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vand_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vand_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vand_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, + size_t vl) { return __riscv_vand_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vand_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vand_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vand_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vand_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vand_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + vint16mf4_t vs1, size_t vl) { return __riscv_vand_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vand_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vand_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vand_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vand_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vand_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vand_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vand_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vand_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vand_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vand_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vand_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vand_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vand_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vand_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, + size_t vl) { return __riscv_vand_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vand_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vand_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, + size_t vl) { return __riscv_vand_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vand_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vand_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vand_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vand_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vand_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, + size_t vl) { return __riscv_vand_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vand_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vand_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vand_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vand_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vand_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, + size_t vl) { return __riscv_vand_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vand_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vand_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, + size_t vl) { return __riscv_vand_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vand_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vand_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vand_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vand_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vand_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vand_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vand_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vand_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vand_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vand_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vand_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, + size_t vl) { return __riscv_vand_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vand_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vand_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, + size_t vl) { return __riscv_vand_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vand_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vand_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vand_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vand_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vand_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, + size_t vl) { return __riscv_vand_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vand_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vand_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, + size_t vl) { return __riscv_vand_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vand_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vand_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, + size_t vl) { return __riscv_vand_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vand_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vand_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, + size_t vl) { return __riscv_vand_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vand_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vand_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vand_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vand_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vand_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, + size_t vl) { return __riscv_vand_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vand_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vand_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, + size_t vl) { return __riscv_vand_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vand_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vand_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, + size_t vl) { return __riscv_vand_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vand_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vand_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, + size_t vl) { return __riscv_vand_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vand_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vand_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, + size_t vl) { return __riscv_vand_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vand_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vand_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, + size_t vl) { return __riscv_vand_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vand_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vand_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, + size_t vl) { return __riscv_vand_vx_i64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vand_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vand_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vand_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vand_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vand_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vand_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vand_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vand_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vand_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vand_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vand_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vand_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vand_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vand_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vand_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vand_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vand_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vand_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vand_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vand_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vand_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vand_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vand_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vand_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vand_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vand_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vand_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vand_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vand_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vand_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vand_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vand_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vand_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vand_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vand_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vand_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vand_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vand_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { return __riscv_vand_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vand_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vand_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vand_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vand_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vand_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vand_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vand_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vand_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vand_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vand_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vand_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vand_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vand_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vand_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vand_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vand_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vand_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vand_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vand_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vand_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vand_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vand_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vand_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vand_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vand_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vand_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vand_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vand_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vand_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vand_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vand_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vand_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vand_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vand_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vand_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vand_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vand_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vand_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vand_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vand_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vand_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vand_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vand_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vand_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vand_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vand_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vand_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vand_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vand_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vand_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vand_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vand_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vand_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vand_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vand_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vand_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vand_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vand_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vand_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vand_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vand_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vand_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vand_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vand_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vand_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vand_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vand_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vand_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vand_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vand_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vand_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vand_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vand_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vand_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vand_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vand_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vand_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vand_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vand_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vand_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vand_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vand_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vand_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vand_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vand_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vand_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vand_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vand_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vand_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vand_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vand_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vand_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vand_vx_u64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vand_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vand_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vand_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vand_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vand_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vand_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vand_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vand_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vand_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vand_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vand_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vand_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vand_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vand_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vand_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vand_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vand_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vand_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vand_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vand_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vand_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vand_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vand_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vand_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vand_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vand_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vand_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vand_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vand_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vand_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vand_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vand_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vand_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vand_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vand_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vand_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vand_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vand_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vand_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vand_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vand_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vand_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vand_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vand_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vand_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vand_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vand_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vand_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vand_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vand_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vand_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vand_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vand_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vand_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vand_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vand_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vand_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vand_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vand_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vand_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vand_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vand_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vand_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vand_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vand_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vand_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vand_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vand_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vand_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vand_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vand_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vand_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vand_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vand_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vand_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vand_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vand_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vand_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vand_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vand_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vand_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vand_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vand_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vand_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vand_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vand_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vand_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vand_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vand_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vand_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vand_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vand_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vand_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vand_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vand_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vand_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vand_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vand_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vand_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vand_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vand_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vand_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vand_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vand_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vand_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vand_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vand_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vand_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vand_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vand_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vand_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vand_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vand_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vand_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vand_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vand_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vand_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vand_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vand_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vand_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vand_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vand_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vand_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vand_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vand_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vand_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vand_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vand_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vand_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vand_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vand_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vand_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vand_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vand_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vand_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vand_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vand_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vand_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vand_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vand_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vand_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vand_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vand_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vand_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vand_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vand_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vand_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vand_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vand_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vand_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vand_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vand_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vand_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vand_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vand_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vand_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vand_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vand_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vand_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vand_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vand_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vand_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vand_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vand_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vand_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vand_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vand_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vand_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vand_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vand_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vand_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vand_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vand_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vand_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vand_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vand_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vand_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vand_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vand_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vand_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vand_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vand_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vand_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vand_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vand_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vand_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vand_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vand_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vand_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vand_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vand_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vand_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vand_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vand_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vand_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vand_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vand_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vand_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vand_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vand_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vand_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vand_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vand_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vand_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vand_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vand_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vand_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vand_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vand_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vand_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vand_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vand_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vand_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vand_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vand_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vand_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vand_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vand_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vand_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vand_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vand_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vand_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vand_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vand_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vand_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vand_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vand_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vand_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vand_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vand_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vand_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vand_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vand_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vand_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vand_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vand_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vand_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vand_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vand_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vand_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vand_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vand_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vand_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vand_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vand_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vand_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vand_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vand_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vand_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vand_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vand_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vand_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vand_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vand_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vand_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vand_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vand_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vand_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vand_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vand_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vand_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vand_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vand_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vand_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vand_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vand_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vand_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vand_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vand_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vand_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vand_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vand_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vand_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vand_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vand_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vand_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vand_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vand_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vand_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vand_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vand_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vand_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vand_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vand_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vand_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vand_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vand_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vand_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vand_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vand_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vand_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vand_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vand_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vand_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vand_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vand_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vand_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vand_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vand_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vand_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vand_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vand_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vand_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vand_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vand_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vand_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vand_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vand_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vand_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vand_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vand_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vand_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vand_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vand_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vand_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vand_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vand_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vand_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vand_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vand_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vand_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vand_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vand_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vand_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vand_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vand_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vand_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vand_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vand_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vand_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vand_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vand_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vand_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vand_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vand_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vand_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vand_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vand_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vand_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vand_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vand_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vand_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vand_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vand_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vand_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vand_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vand_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vand_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vand_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vand_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vand_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vand_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vand_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vand_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vand_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vand_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vand_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vand_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vand_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vand_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vand_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vand_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vand_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vand_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vand_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vand_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vand_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vand_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vand_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vand_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vand_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vand_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vand_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vand_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vand_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vand_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vand_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vand_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vand_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vand_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vand_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vand_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vand_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vand_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vand_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vand_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vand_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vand_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vand_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vand_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vand_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vand_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vand_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vand_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vand_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vand_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vand_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vand_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vand_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vand_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vand_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vand_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vand_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vand_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vand_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vand_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vand_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vand_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vand_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vand_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vand_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vand_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vand_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vand_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vand_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vand_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vand_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vand_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vand_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vand_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vand_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vand_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vand_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vand_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vand_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vand_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vand_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vand_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vand_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vand_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vand_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vand_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vand_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vand_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vand_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vand_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vand_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vand_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vand_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vand_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vand_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vand_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vand_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vand_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vand_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vand_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vand_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vand_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vand_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vand_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vand_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vand_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vand_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vand_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vand_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vand_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vand_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vand_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vand_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vand_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vand_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vand_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vand_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vand_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vand_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vand_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vand_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vand_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vand_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vand_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vand_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vand_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vand_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vand_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vand_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vand_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vand_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vand_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vand_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vand_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vand_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vand_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vand_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vand_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vand_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vand_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vand_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vand_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vand_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vand_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vand_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vand_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vand_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vand_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vand_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vand_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vand_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vand_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vand_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vand_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vand_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vand_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vand_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { return __riscv_vand_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vand_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vand_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vand_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vand_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vand_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vand_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vand_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vand_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vand_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vand_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vand_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vand_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vand_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vand_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vand_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vand_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vand_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vand_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vand_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vand_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vand_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vand_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vand_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vand_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vand_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vand_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vand_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vand_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vand_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vand_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vand_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vand_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vand_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vand_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vand_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vand_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vand_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vand_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vand_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vand_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vand_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vand_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vand_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vand_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vand_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vand_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vand_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vand_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vand_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vand_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vand_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vand_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vand_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vand_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vand_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vand_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vand_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vand_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vand_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vand_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vand_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vand_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vand_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vand_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vand_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vand_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vand_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vand_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vand_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vand_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vand_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vand_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vand_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vand_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vand_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vand_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vand_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vand_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vand_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vand_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vand_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vand_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vand_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vand_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vand_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vand_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vand_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vand_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vand_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vand_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vand_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vand_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vand_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vand_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vand_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vand_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vand_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vand_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vand_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vand_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vand_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vand_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vand_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vand_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vand_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vand_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vand_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vand_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vand_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vand_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vand_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vand_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vand_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vand_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vand_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vand_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vand_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vand_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vand_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vand_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vand_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vand_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vand_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vand_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vand_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vand_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vand_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vand_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vand_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vand_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vand_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vand_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vand_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vand_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vand_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vand_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vand_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vand_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vand_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vand_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vand_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vand_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vand_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vand_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vand_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vand_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vand_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vand_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vand_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vand_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vand_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vand_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vand_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vand_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vand_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vand_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vand_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vand_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vand_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vand_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vand_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vand_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vand_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vand_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vand_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vand_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vand_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vand_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vand_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vand_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vand_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vand_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vand_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vand_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vand_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vand_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vand_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vand_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vand_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vand_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vand_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vand_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vand_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vand_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vand_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vand_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vand_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vand_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vand_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vand_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vand_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vand_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vand_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vand_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vand_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vand_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vand_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vand_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vand_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vand_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vand_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vand_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vand_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vand_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vand_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vand_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vand_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vand_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vand_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vand_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vand_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vand_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vand_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vand_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vand_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vand_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vand_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vand_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vand_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vand_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vand_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vand_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vand_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vand_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vand_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vand_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vand_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vand_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vand_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vand_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vand_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vand_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vand_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vand_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vand_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vand_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vand_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vand_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vand_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vand_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vand_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vand_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vand_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vand_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vand_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vand_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vand_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vand_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vand_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vand_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vand_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vand_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vand_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vand_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vand_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vand_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vand_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vand_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vand_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vand_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vand_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vand_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vand_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vand_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vand_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vand_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vand_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vand_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vand_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vand_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vand_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vand_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vand_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vand_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vand_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vand_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vand_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vand_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vand_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vand_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vand_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vand_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vand_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vand_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vand_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vand_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vand_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vand_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vasub.c b/auto-generated/policy_funcs/llvm-api-tests/vasub.c index 6c53199b5..d2f1cc0b9 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vasub.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vasub.c @@ -5,706 +5,891 @@ #include -vint8mf8_t test_vasub_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vasub_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, + size_t vl) { return __riscv_vasub_vv_i8mf8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vasub_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vasub_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vasub_vx_i8mf8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vasub_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, + size_t vl) { return __riscv_vasub_vv_i8mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vasub_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vasub_vx_i8mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vasub_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, + size_t vl) { return __riscv_vasub_vv_i8mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vasub_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vasub_vx_i8mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vasub_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vasub_vv_i8m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vasub_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, + size_t vl) { return __riscv_vasub_vx_i8m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vasub_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, + size_t vl) { return __riscv_vasub_vv_i8m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vasub_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vasub_vx_i8m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vasub_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, + size_t vl) { return __riscv_vasub_vv_i8m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vasub_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vasub_vx_i8m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vasub_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, + size_t vl) { return __riscv_vasub_vv_i8m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vasub_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vasub_vx_i8m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vasub_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + vint16mf4_t vs1, size_t vl) { return __riscv_vasub_vv_i16mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vasub_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vasub_vx_i16mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vasub_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vasub_vv_i16mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vasub_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vasub_vx_i16mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vasub_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vasub_vv_i16m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vasub_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, + size_t vl) { return __riscv_vasub_vx_i16m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vasub_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, + size_t vl) { return __riscv_vasub_vv_i16m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vasub_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vasub_vx_i16m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vasub_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, + size_t vl) { return __riscv_vasub_vv_i16m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vasub_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vasub_vx_i16m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vasub_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, + size_t vl) { return __riscv_vasub_vv_i16m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vasub_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, + size_t vl) { return __riscv_vasub_vx_i16m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vasub_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vasub_vv_i32mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vasub_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vasub_vx_i32mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vasub_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vasub_vv_i32m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vasub_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, + size_t vl) { return __riscv_vasub_vx_i32m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vasub_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, + size_t vl) { return __riscv_vasub_vv_i32m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vasub_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vasub_vx_i32m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vasub_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, + size_t vl) { return __riscv_vasub_vv_i32m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vasub_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, + size_t vl) { return __riscv_vasub_vx_i32m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vasub_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, + size_t vl) { return __riscv_vasub_vv_i32m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vasub_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, + size_t vl) { return __riscv_vasub_vx_i32m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vasub_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vasub_vv_i64m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vasub_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, + size_t vl) { return __riscv_vasub_vx_i64m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vasub_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, + size_t vl) { return __riscv_vasub_vv_i64m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vasub_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, + size_t vl) { return __riscv_vasub_vx_i64m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vasub_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, + size_t vl) { return __riscv_vasub_vv_i64m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vasub_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, + size_t vl) { return __riscv_vasub_vx_i64m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vasub_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, + size_t vl) { return __riscv_vasub_vv_i64m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vasub_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, + size_t vl) { return __riscv_vasub_vx_i64m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vasub_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vasub_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vasub_vv_i8mf8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vasub_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vasub_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vasub_vx_i8mf8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vasub_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vasub_vv_i8mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vasub_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vasub_vx_i8mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vasub_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vasub_vv_i8mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vasub_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vasub_vx_i8mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vasub_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vasub_vv_i8m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vasub_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vasub_vx_i8m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vasub_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vasub_vv_i8m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vasub_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vasub_vx_i8m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vasub_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vasub_vv_i8m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vasub_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vasub_vx_i8m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vasub_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vasub_vv_i8m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vasub_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vasub_vx_i8m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vasub_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vasub_vv_i16mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vasub_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vasub_vx_i16mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vasub_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vasub_vv_i16mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vasub_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vasub_vx_i16mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vasub_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vasub_vv_i16m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vasub_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vasub_vx_i16m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vasub_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vasub_vv_i16m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vasub_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vasub_vx_i16m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vasub_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vasub_vv_i16m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vasub_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vasub_vx_i16m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vasub_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vasub_vv_i16m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vasub_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vasub_vx_i16m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vasub_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vasub_vv_i32mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vasub_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vasub_vx_i32mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vasub_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vasub_vv_i32m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vasub_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vasub_vx_i32m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vasub_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vasub_vv_i32m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vasub_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vasub_vx_i32m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vasub_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vasub_vv_i32m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vasub_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vasub_vx_i32m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vasub_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vasub_vv_i32m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vasub_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vasub_vx_i32m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vasub_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vasub_vv_i64m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vasub_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vasub_vx_i64m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vasub_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vasub_vv_i64m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vasub_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vasub_vx_i64m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vasub_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vasub_vv_i64m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vasub_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vasub_vx_i64m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vasub_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vasub_vv_i64m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vasub_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vasub_vx_i64m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vasub_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vasub_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vasub_vv_i8mf8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vasub_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vasub_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vasub_vx_i8mf8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vasub_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vasub_vv_i8mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vasub_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vasub_vx_i8mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vasub_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vasub_vv_i8mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vasub_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vasub_vx_i8mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vasub_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vasub_vv_i8m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vasub_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vasub_vx_i8m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vasub_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vasub_vv_i8m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vasub_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vasub_vx_i8m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vasub_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vasub_vv_i8m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vasub_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vasub_vx_i8m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vasub_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vasub_vv_i8m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vasub_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vasub_vx_i8m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vasub_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vasub_vv_i16mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vasub_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vasub_vx_i16mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vasub_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vasub_vv_i16mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vasub_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vasub_vx_i16mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vasub_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vasub_vv_i16m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vasub_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vasub_vx_i16m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vasub_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vasub_vv_i16m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vasub_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vasub_vx_i16m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vasub_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vasub_vv_i16m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vasub_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vasub_vx_i16m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vasub_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vasub_vv_i16m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vasub_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vasub_vx_i16m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vasub_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vasub_vv_i32mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vasub_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vasub_vx_i32mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vasub_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vasub_vv_i32m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vasub_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vasub_vx_i32m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vasub_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vasub_vv_i32m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vasub_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vasub_vx_i32m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vasub_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vasub_vv_i32m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vasub_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vasub_vx_i32m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vasub_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vasub_vv_i32m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vasub_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vasub_vx_i32m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vasub_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vasub_vv_i64m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vasub_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vasub_vx_i64m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vasub_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vasub_vv_i64m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vasub_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vasub_vx_i64m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vasub_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vasub_vv_i64m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vasub_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vasub_vx_i64m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vasub_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vasub_vv_i64m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vasub_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vasub_vx_i64m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vasub_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vasub_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vasub_vv_i8mf8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vasub_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vasub_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vasub_vx_i8mf8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vasub_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vasub_vv_i8mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vasub_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vasub_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vasub_vx_i8mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vasub_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vasub_vv_i8mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vasub_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vasub_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vasub_vx_i8mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vasub_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vasub_vv_i8m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vasub_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vasub_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vasub_vx_i8m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vasub_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vasub_vv_i8m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vasub_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vasub_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vasub_vx_i8m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vasub_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vasub_vv_i8m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vasub_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vasub_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vasub_vx_i8m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vasub_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vasub_vv_i8m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vasub_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vasub_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vasub_vx_i8m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vasub_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vasub_vv_i16mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vasub_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vasub_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vasub_vx_i16mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vasub_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vasub_vv_i16mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vasub_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vasub_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vasub_vx_i16mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vasub_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vasub_vv_i16m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vasub_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vasub_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vasub_vx_i16m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vasub_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vasub_vv_i16m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vasub_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vasub_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vasub_vx_i16m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vasub_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vasub_vv_i16m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vasub_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vasub_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vasub_vx_i16m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vasub_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vasub_vv_i16m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vasub_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vasub_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vasub_vx_i16m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vasub_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vasub_vv_i32mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vasub_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vasub_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vasub_vx_i32mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vasub_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vasub_vv_i32m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vasub_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vasub_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vasub_vx_i32m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vasub_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vasub_vv_i32m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vasub_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vasub_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vasub_vx_i32m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vasub_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vasub_vv_i32m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vasub_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vasub_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vasub_vx_i32m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vasub_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vasub_vv_i32m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vasub_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vasub_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vasub_vx_i32m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vasub_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vasub_vv_i64m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vasub_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vasub_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vasub_vx_i64m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vasub_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vasub_vv_i64m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vasub_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vasub_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vasub_vx_i64m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vasub_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vasub_vv_i64m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vasub_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vasub_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vasub_vx_i64m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vasub_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vasub_vv_i64m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vasub_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vasub_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vasub_vx_i64m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vasubu.c b/auto-generated/policy_funcs/llvm-api-tests/vasubu.c index 6b0a6b6a7..0e49abf02 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vasubu.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vasubu.c @@ -5,706 +5,957 @@ #include -vuint8mf8_t test_vasubu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vasubu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vasubu_vv_u8mf8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vasubu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vasubu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vasubu_vx_u8mf8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vasubu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vasubu_vv_u8mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vasubu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vasubu_vx_u8mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vasubu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vasubu_vv_u8mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vasubu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vasubu_vx_u8mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vasubu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vasubu_vv_u8m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vasubu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vasubu_vx_u8m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vasubu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vasubu_vv_u8m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vasubu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vasubu_vx_u8m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vasubu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vasubu_vv_u8m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vasubu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vasubu_vx_u8m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vasubu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { return __riscv_vasubu_vv_u8m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vasubu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vasubu_vx_u8m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vasubu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vasubu_vv_u16mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vasubu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vasubu_vx_u16mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vasubu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vasubu_vv_u16mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vasubu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vasubu_vx_u16mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vasubu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vasubu_vv_u16m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vasubu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vasubu_vx_u16m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vasubu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vasubu_vv_u16m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vasubu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vasubu_vx_u16m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vasubu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vasubu_vv_u16m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vasubu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vasubu_vx_u16m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vasubu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vasubu_vv_u16m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vasubu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vasubu_vx_u16m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vasubu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vasubu_vv_u32mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vasubu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vasubu_vx_u32mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vasubu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vasubu_vv_u32m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vasubu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vasubu_vx_u32m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vasubu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vasubu_vv_u32m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vasubu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vasubu_vx_u32m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vasubu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vasubu_vv_u32m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vasubu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vasubu_vx_u32m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vasubu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vasubu_vv_u32m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vasubu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vasubu_vx_u32m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vasubu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vasubu_vv_u64m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vasubu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vasubu_vx_u64m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vasubu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vasubu_vv_u64m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vasubu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vasubu_vx_u64m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vasubu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vasubu_vv_u64m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vasubu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vasubu_vx_u64m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vasubu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vasubu_vv_u64m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vasubu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vasubu_vx_u64m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vasubu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vasubu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vasubu_vv_u8mf8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vasubu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vasubu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vasubu_vx_u8mf8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vasubu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vasubu_vv_u8mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vasubu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vasubu_vx_u8mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vasubu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vasubu_vv_u8mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vasubu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vasubu_vx_u8mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vasubu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vasubu_vv_u8m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vasubu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vasubu_vx_u8m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vasubu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vasubu_vv_u8m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vasubu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vasubu_vx_u8m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vasubu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vasubu_vv_u8m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vasubu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vasubu_vx_u8m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vasubu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vasubu_vv_u8m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vasubu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vasubu_vx_u8m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vasubu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vasubu_vv_u16mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vasubu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vasubu_vx_u16mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vasubu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vasubu_vv_u16mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vasubu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vasubu_vx_u16mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vasubu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vasubu_vv_u16m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vasubu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vasubu_vx_u16m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vasubu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vasubu_vv_u16m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vasubu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vasubu_vx_u16m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vasubu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vasubu_vv_u16m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vasubu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vasubu_vx_u16m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vasubu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vasubu_vv_u16m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vasubu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vasubu_vx_u16m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vasubu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vasubu_vv_u32mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vasubu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vasubu_vx_u32mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vasubu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vasubu_vv_u32m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vasubu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vasubu_vx_u32m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vasubu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vasubu_vv_u32m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vasubu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vasubu_vx_u32m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vasubu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vasubu_vv_u32m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vasubu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vasubu_vx_u32m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vasubu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vasubu_vv_u32m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vasubu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { return __riscv_vasubu_vx_u32m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vasubu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vasubu_vv_u64m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vasubu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vasubu_vx_u64m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vasubu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vasubu_vv_u64m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vasubu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vasubu_vx_u64m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vasubu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vasubu_vv_u64m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vasubu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vasubu_vx_u64m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vasubu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vasubu_vv_u64m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vasubu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vasubu_vx_u64m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vasubu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vasubu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vasubu_vv_u8mf8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vasubu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vasubu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vasubu_vx_u8mf8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vasubu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vasubu_vv_u8mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vasubu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vasubu_vx_u8mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vasubu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vasubu_vv_u8mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vasubu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vasubu_vx_u8mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vasubu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vasubu_vv_u8m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vasubu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vasubu_vx_u8m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vasubu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vasubu_vv_u8m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vasubu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vasubu_vx_u8m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vasubu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vasubu_vv_u8m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vasubu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vasubu_vx_u8m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vasubu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vasubu_vv_u8m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vasubu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vasubu_vx_u8m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vasubu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vasubu_vv_u16mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vasubu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vasubu_vx_u16mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vasubu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vasubu_vv_u16mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vasubu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vasubu_vx_u16mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vasubu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vasubu_vv_u16m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vasubu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vasubu_vx_u16m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vasubu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vasubu_vv_u16m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vasubu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vasubu_vx_u16m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vasubu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vasubu_vv_u16m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vasubu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vasubu_vx_u16m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vasubu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vasubu_vv_u16m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vasubu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vasubu_vx_u16m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vasubu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vasubu_vv_u32mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vasubu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vasubu_vx_u32mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vasubu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vasubu_vv_u32m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vasubu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vasubu_vx_u32m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vasubu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vasubu_vv_u32m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vasubu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vasubu_vx_u32m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vasubu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vasubu_vv_u32m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vasubu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vasubu_vx_u32m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vasubu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vasubu_vv_u32m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vasubu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vasubu_vx_u32m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vasubu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vasubu_vv_u64m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vasubu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vasubu_vx_u64m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vasubu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vasubu_vv_u64m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vasubu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vasubu_vx_u64m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vasubu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vasubu_vv_u64m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vasubu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vasubu_vx_u64m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vasubu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vasubu_vv_u64m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vasubu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vasubu_vx_u64m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vasubu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vasubu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vasubu_vv_u8mf8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vasubu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vasubu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vasubu_vx_u8mf8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vasubu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vasubu_vv_u8mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vasubu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vasubu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vasubu_vx_u8mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vasubu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vasubu_vv_u8mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vasubu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vasubu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vasubu_vx_u8mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vasubu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vasubu_vv_u8m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vasubu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vasubu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vasubu_vx_u8m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vasubu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vasubu_vv_u8m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vasubu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vasubu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vasubu_vx_u8m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vasubu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vasubu_vv_u8m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vasubu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vasubu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vasubu_vx_u8m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vasubu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vasubu_vv_u8m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vasubu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vasubu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vasubu_vx_u8m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vasubu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vasubu_vv_u16mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vasubu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vasubu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vasubu_vx_u16mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vasubu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vasubu_vv_u16mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vasubu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vasubu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vasubu_vx_u16mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vasubu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vasubu_vv_u16m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vasubu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vasubu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vasubu_vx_u16m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vasubu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vasubu_vv_u16m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vasubu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vasubu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vasubu_vx_u16m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vasubu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vasubu_vv_u16m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vasubu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vasubu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vasubu_vx_u16m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vasubu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vasubu_vv_u16m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vasubu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vasubu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vasubu_vx_u16m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vasubu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vasubu_vv_u32mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vasubu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vasubu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vasubu_vx_u32mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vasubu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vasubu_vv_u32m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vasubu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vasubu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vasubu_vx_u32m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vasubu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vasubu_vv_u32m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vasubu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vasubu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vasubu_vx_u32m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vasubu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vasubu_vv_u32m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vasubu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vasubu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vasubu_vx_u32m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vasubu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vasubu_vv_u32m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vasubu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vasubu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { return __riscv_vasubu_vx_u32m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vasubu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vasubu_vv_u64m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vasubu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vasubu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vasubu_vx_u64m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vasubu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vasubu_vv_u64m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vasubu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vasubu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vasubu_vx_u64m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vasubu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vasubu_vv_u64m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vasubu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vasubu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vasubu_vx_u64m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vasubu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vasubu_vv_u64m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vasubu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vasubu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vasubu_vx_u64m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vcompress.c b/auto-generated/policy_funcs/llvm-api-tests/vcompress.c index e14737c19..ebdee1337 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vcompress.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vcompress.c @@ -1,243 +1,302 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vcompress_vm_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vbool64_t vs1, size_t vl) { +vfloat16mf4_t test_vcompress_vm_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + vbool64_t vs1, size_t vl) { return __riscv_vcompress_vm_f16mf4_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vcompress_vm_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vbool32_t vs1, size_t vl) { +vfloat16mf2_t test_vcompress_vm_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + vbool32_t vs1, size_t vl) { return __riscv_vcompress_vm_f16mf2_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vcompress_vm_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vbool16_t vs1, size_t vl) { +vfloat16m1_t test_vcompress_vm_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + vbool16_t vs1, size_t vl) { return __riscv_vcompress_vm_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vcompress_vm_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vbool8_t vs1, size_t vl) { +vfloat16m2_t test_vcompress_vm_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + vbool8_t vs1, size_t vl) { return __riscv_vcompress_vm_f16m2_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vcompress_vm_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vbool4_t vs1, size_t vl) { +vfloat16m4_t test_vcompress_vm_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + vbool4_t vs1, size_t vl) { return __riscv_vcompress_vm_f16m4_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vcompress_vm_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vbool2_t vs1, size_t vl) { +vfloat16m8_t test_vcompress_vm_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + vbool2_t vs1, size_t vl) { return __riscv_vcompress_vm_f16m8_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vcompress_vm_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vbool64_t vs1, size_t vl) { +vfloat32mf2_t test_vcompress_vm_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + vbool64_t vs1, size_t vl) { return __riscv_vcompress_vm_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vcompress_vm_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vbool32_t vs1, size_t vl) { +vfloat32m1_t test_vcompress_vm_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vbool32_t vs1, size_t vl) { return __riscv_vcompress_vm_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vcompress_vm_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vbool16_t vs1, size_t vl) { +vfloat32m2_t test_vcompress_vm_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + vbool16_t vs1, size_t vl) { return __riscv_vcompress_vm_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vcompress_vm_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vbool8_t vs1, size_t vl) { +vfloat32m4_t test_vcompress_vm_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + vbool8_t vs1, size_t vl) { return __riscv_vcompress_vm_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vcompress_vm_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vbool4_t vs1, size_t vl) { +vfloat32m8_t test_vcompress_vm_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + vbool4_t vs1, size_t vl) { return __riscv_vcompress_vm_f32m8_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vcompress_vm_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vbool64_t vs1, size_t vl) { +vfloat64m1_t test_vcompress_vm_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + vbool64_t vs1, size_t vl) { return __riscv_vcompress_vm_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vcompress_vm_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vbool32_t vs1, size_t vl) { +vfloat64m2_t test_vcompress_vm_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + vbool32_t vs1, size_t vl) { return __riscv_vcompress_vm_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vcompress_vm_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vbool16_t vs1, size_t vl) { +vfloat64m4_t test_vcompress_vm_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + vbool16_t vs1, size_t vl) { return __riscv_vcompress_vm_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vcompress_vm_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vbool8_t vs1, size_t vl) { +vfloat64m8_t test_vcompress_vm_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + vbool8_t vs1, size_t vl) { return __riscv_vcompress_vm_f64m8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vcompress_vm_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vbool64_t vs1, size_t vl) { +vint8mf8_t test_vcompress_vm_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, + vbool64_t vs1, size_t vl) { return __riscv_vcompress_vm_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vcompress_vm_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vbool32_t vs1, size_t vl) { +vint8mf4_t test_vcompress_vm_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, + vbool32_t vs1, size_t vl) { return __riscv_vcompress_vm_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vcompress_vm_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vbool16_t vs1, size_t vl) { +vint8mf2_t test_vcompress_vm_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, + vbool16_t vs1, size_t vl) { return __riscv_vcompress_vm_i8mf2_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vcompress_vm_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vbool8_t vs1, size_t vl) { +vint8m1_t test_vcompress_vm_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vbool8_t vs1, + size_t vl) { return __riscv_vcompress_vm_i8m1_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vcompress_vm_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vbool4_t vs1, size_t vl) { +vint8m2_t test_vcompress_vm_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vbool4_t vs1, + size_t vl) { return __riscv_vcompress_vm_i8m2_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vcompress_vm_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vbool2_t vs1, size_t vl) { +vint8m4_t test_vcompress_vm_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vbool2_t vs1, + size_t vl) { return __riscv_vcompress_vm_i8m4_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vcompress_vm_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vbool1_t vs1, size_t vl) { +vint8m8_t test_vcompress_vm_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vbool1_t vs1, + size_t vl) { return __riscv_vcompress_vm_i8m8_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vcompress_vm_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vbool64_t vs1, size_t vl) { +vint16mf4_t test_vcompress_vm_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + vbool64_t vs1, size_t vl) { return __riscv_vcompress_vm_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vcompress_vm_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vbool32_t vs1, size_t vl) { +vint16mf2_t test_vcompress_vm_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + vbool32_t vs1, size_t vl) { return __riscv_vcompress_vm_i16mf2_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vcompress_vm_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vbool16_t vs1, size_t vl) { +vint16m1_t test_vcompress_vm_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, + vbool16_t vs1, size_t vl) { return __riscv_vcompress_vm_i16m1_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vcompress_vm_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vbool8_t vs1, size_t vl) { +vint16m2_t test_vcompress_vm_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, + vbool8_t vs1, size_t vl) { return __riscv_vcompress_vm_i16m2_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vcompress_vm_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vbool4_t vs1, size_t vl) { +vint16m4_t test_vcompress_vm_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, + vbool4_t vs1, size_t vl) { return __riscv_vcompress_vm_i16m4_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vcompress_vm_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vbool2_t vs1, size_t vl) { +vint16m8_t test_vcompress_vm_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, + vbool2_t vs1, size_t vl) { return __riscv_vcompress_vm_i16m8_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vcompress_vm_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vbool64_t vs1, size_t vl) { +vint32mf2_t test_vcompress_vm_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + vbool64_t vs1, size_t vl) { return __riscv_vcompress_vm_i32mf2_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vcompress_vm_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vbool32_t vs1, size_t vl) { +vint32m1_t test_vcompress_vm_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, + vbool32_t vs1, size_t vl) { return __riscv_vcompress_vm_i32m1_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vcompress_vm_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vbool16_t vs1, size_t vl) { +vint32m2_t test_vcompress_vm_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, + vbool16_t vs1, size_t vl) { return __riscv_vcompress_vm_i32m2_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vcompress_vm_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vbool8_t vs1, size_t vl) { +vint32m4_t test_vcompress_vm_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, + vbool8_t vs1, size_t vl) { return __riscv_vcompress_vm_i32m4_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vcompress_vm_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vbool4_t vs1, size_t vl) { +vint32m8_t test_vcompress_vm_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, + vbool4_t vs1, size_t vl) { return __riscv_vcompress_vm_i32m8_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vcompress_vm_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vbool64_t vs1, size_t vl) { +vint64m1_t test_vcompress_vm_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, + vbool64_t vs1, size_t vl) { return __riscv_vcompress_vm_i64m1_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vcompress_vm_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vbool32_t vs1, size_t vl) { +vint64m2_t test_vcompress_vm_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, + vbool32_t vs1, size_t vl) { return __riscv_vcompress_vm_i64m2_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vcompress_vm_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vbool16_t vs1, size_t vl) { +vint64m4_t test_vcompress_vm_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, + vbool16_t vs1, size_t vl) { return __riscv_vcompress_vm_i64m4_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vcompress_vm_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vbool8_t vs1, size_t vl) { +vint64m8_t test_vcompress_vm_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, + vbool8_t vs1, size_t vl) { return __riscv_vcompress_vm_i64m8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vcompress_vm_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vbool64_t vs1, size_t vl) { +vuint8mf8_t test_vcompress_vm_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vbool64_t vs1, size_t vl) { return __riscv_vcompress_vm_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vcompress_vm_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vbool32_t vs1, size_t vl) { +vuint8mf4_t test_vcompress_vm_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vbool32_t vs1, size_t vl) { return __riscv_vcompress_vm_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vcompress_vm_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vbool16_t vs1, size_t vl) { +vuint8mf2_t test_vcompress_vm_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vbool16_t vs1, size_t vl) { return __riscv_vcompress_vm_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vcompress_vm_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vbool8_t vs1, size_t vl) { +vuint8m1_t test_vcompress_vm_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, + vbool8_t vs1, size_t vl) { return __riscv_vcompress_vm_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vcompress_vm_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vbool4_t vs1, size_t vl) { +vuint8m2_t test_vcompress_vm_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, + vbool4_t vs1, size_t vl) { return __riscv_vcompress_vm_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vcompress_vm_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vbool2_t vs1, size_t vl) { +vuint8m4_t test_vcompress_vm_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, + vbool2_t vs1, size_t vl) { return __riscv_vcompress_vm_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vcompress_vm_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vbool1_t vs1, size_t vl) { +vuint8m8_t test_vcompress_vm_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, + vbool1_t vs1, size_t vl) { return __riscv_vcompress_vm_u8m8_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vcompress_vm_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vbool64_t vs1, size_t vl) { +vuint16mf4_t test_vcompress_vm_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vbool64_t vs1, size_t vl) { return __riscv_vcompress_vm_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vcompress_vm_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vbool32_t vs1, size_t vl) { +vuint16mf2_t test_vcompress_vm_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vbool32_t vs1, size_t vl) { return __riscv_vcompress_vm_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vcompress_vm_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vbool16_t vs1, size_t vl) { +vuint16m1_t test_vcompress_vm_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vbool16_t vs1, size_t vl) { return __riscv_vcompress_vm_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vcompress_vm_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vbool8_t vs1, size_t vl) { +vuint16m2_t test_vcompress_vm_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vbool8_t vs1, size_t vl) { return __riscv_vcompress_vm_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vcompress_vm_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vbool4_t vs1, size_t vl) { +vuint16m4_t test_vcompress_vm_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vbool4_t vs1, size_t vl) { return __riscv_vcompress_vm_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vcompress_vm_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vbool2_t vs1, size_t vl) { +vuint16m8_t test_vcompress_vm_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vbool2_t vs1, size_t vl) { return __riscv_vcompress_vm_u16m8_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vcompress_vm_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vbool64_t vs1, size_t vl) { +vuint32mf2_t test_vcompress_vm_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vbool64_t vs1, size_t vl) { return __riscv_vcompress_vm_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vcompress_vm_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vbool32_t vs1, size_t vl) { +vuint32m1_t test_vcompress_vm_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vbool32_t vs1, size_t vl) { return __riscv_vcompress_vm_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vcompress_vm_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vbool16_t vs1, size_t vl) { +vuint32m2_t test_vcompress_vm_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vbool16_t vs1, size_t vl) { return __riscv_vcompress_vm_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vcompress_vm_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vbool8_t vs1, size_t vl) { +vuint32m4_t test_vcompress_vm_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vbool8_t vs1, size_t vl) { return __riscv_vcompress_vm_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vcompress_vm_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vbool4_t vs1, size_t vl) { +vuint32m8_t test_vcompress_vm_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vbool4_t vs1, size_t vl) { return __riscv_vcompress_vm_u32m8_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vcompress_vm_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vbool64_t vs1, size_t vl) { +vuint64m1_t test_vcompress_vm_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vbool64_t vs1, size_t vl) { return __riscv_vcompress_vm_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vcompress_vm_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vbool32_t vs1, size_t vl) { +vuint64m2_t test_vcompress_vm_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vbool32_t vs1, size_t vl) { return __riscv_vcompress_vm_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vcompress_vm_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vbool16_t vs1, size_t vl) { +vuint64m4_t test_vcompress_vm_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vbool16_t vs1, size_t vl) { return __riscv_vcompress_vm_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vcompress_vm_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vbool8_t vs1, size_t vl) { +vuint64m8_t test_vcompress_vm_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vbool8_t vs1, size_t vl) { return __riscv_vcompress_vm_u64m8_tu(vd, vs2, vs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vdiv.c b/auto-generated/policy_funcs/llvm-api-tests/vdiv.c index ee545c0dc..ba85efa6e 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vdiv.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vdiv.c @@ -5,706 +5,891 @@ #include -vint8mf8_t test_vdiv_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vdiv_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, + size_t vl) { return __riscv_vdiv_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vdiv_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vdiv_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vdiv_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vdiv_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vdiv_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, + size_t vl) { return __riscv_vdiv_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vdiv_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vdiv_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vdiv_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vdiv_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vdiv_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, + size_t vl) { return __riscv_vdiv_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vdiv_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vdiv_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vdiv_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vdiv_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vdiv_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vdiv_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vdiv_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vdiv_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, + size_t vl) { return __riscv_vdiv_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vdiv_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vdiv_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, + size_t vl) { return __riscv_vdiv_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vdiv_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vdiv_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vdiv_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vdiv_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vdiv_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, + size_t vl) { return __riscv_vdiv_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vdiv_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vdiv_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vdiv_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vdiv_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vdiv_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, + size_t vl) { return __riscv_vdiv_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vdiv_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vdiv_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vdiv_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vdiv_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vdiv_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + vint16mf4_t vs1, size_t vl) { return __riscv_vdiv_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vdiv_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vdiv_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vdiv_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vdiv_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vdiv_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vdiv_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vdiv_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vdiv_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vdiv_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vdiv_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vdiv_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vdiv_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vdiv_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vdiv_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, + size_t vl) { return __riscv_vdiv_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vdiv_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vdiv_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, + size_t vl) { return __riscv_vdiv_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vdiv_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vdiv_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vdiv_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vdiv_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vdiv_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, + size_t vl) { return __riscv_vdiv_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vdiv_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vdiv_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vdiv_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vdiv_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vdiv_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, + size_t vl) { return __riscv_vdiv_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vdiv_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vdiv_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, + size_t vl) { return __riscv_vdiv_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vdiv_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vdiv_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vdiv_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vdiv_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vdiv_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vdiv_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vdiv_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vdiv_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vdiv_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vdiv_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vdiv_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, + size_t vl) { return __riscv_vdiv_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vdiv_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vdiv_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, + size_t vl) { return __riscv_vdiv_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vdiv_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vdiv_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vdiv_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vdiv_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vdiv_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, + size_t vl) { return __riscv_vdiv_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vdiv_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vdiv_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, + size_t vl) { return __riscv_vdiv_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vdiv_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vdiv_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, + size_t vl) { return __riscv_vdiv_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vdiv_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vdiv_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, + size_t vl) { return __riscv_vdiv_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vdiv_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vdiv_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vdiv_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vdiv_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vdiv_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, + size_t vl) { return __riscv_vdiv_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vdiv_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vdiv_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, + size_t vl) { return __riscv_vdiv_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vdiv_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vdiv_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, + size_t vl) { return __riscv_vdiv_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vdiv_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vdiv_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, + size_t vl) { return __riscv_vdiv_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vdiv_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vdiv_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, + size_t vl) { return __riscv_vdiv_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vdiv_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vdiv_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, + size_t vl) { return __riscv_vdiv_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vdiv_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vdiv_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, + size_t vl) { return __riscv_vdiv_vx_i64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vdiv_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vdiv_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vdiv_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vdiv_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vdiv_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vdiv_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vdiv_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vdiv_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vdiv_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vdiv_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vdiv_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vdiv_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vdiv_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vdiv_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vdiv_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vdiv_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vdiv_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vdiv_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vdiv_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vdiv_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vdiv_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vdiv_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vdiv_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vdiv_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vdiv_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vdiv_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vdiv_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vdiv_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vdiv_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vdiv_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vdiv_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vdiv_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vdiv_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vdiv_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vdiv_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vdiv_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vdiv_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vdiv_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vdiv_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vdiv_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vdiv_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vdiv_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vdiv_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vdiv_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vdiv_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vdiv_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vdiv_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vdiv_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vdiv_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vdiv_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vdiv_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vdiv_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vdiv_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vdiv_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vdiv_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vdiv_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vdiv_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vdiv_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vdiv_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vdiv_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vdiv_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vdiv_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vdiv_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vdiv_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vdiv_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vdiv_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vdiv_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vdiv_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vdiv_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vdiv_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vdiv_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vdiv_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vdiv_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vdiv_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vdiv_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vdiv_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vdiv_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vdiv_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vdiv_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vdiv_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vdiv_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vdiv_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vdiv_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vdiv_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vdiv_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vdiv_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vdiv_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vdiv_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vdiv_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vdiv_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vdiv_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vdiv_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vdiv_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vdiv_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vdiv_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vdiv_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vdiv_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vdiv_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vdiv_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vdiv_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vdiv_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vdiv_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vdiv_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vdiv_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vdiv_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vdiv_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vdiv_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vdiv_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vdiv_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vdiv_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vdiv_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vdiv_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vdiv_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vdiv_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vdiv_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vdiv_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vdiv_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vdiv_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vdiv_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vdiv_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vdiv_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vdiv_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vdiv_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vdiv_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vdiv_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vdiv_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vdiv_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vdiv_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vdiv_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vdiv_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vdiv_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vdiv_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vdiv_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vdiv_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vdiv_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vdiv_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vdiv_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vdiv_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vdiv_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vdiv_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vdiv_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vdiv_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vdiv_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vdiv_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vdiv_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vdiv_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vdiv_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vdiv_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vdiv_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vdiv_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vdiv_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vdiv_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vdiv_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vdiv_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vdiv_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vdiv_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vdiv_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vdiv_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vdiv_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vdiv_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vdiv_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vdiv_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vdiv_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vdiv_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vdiv_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vdiv_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vdiv_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vdiv_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vdiv_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vdiv_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vdiv_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vdiv_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vdiv_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vdiv_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vdiv_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vdiv_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vdiv_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vdiv_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vdiv_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vdiv_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vdiv_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vdiv_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vdiv_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vdiv_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vdiv_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vdiv_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vdiv_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vdiv_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vdiv_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vdiv_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vdiv_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vdiv_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vdiv_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vdiv_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vdiv_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vdiv_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vdiv_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vdiv_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vdiv_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vdiv_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vdiv_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vdiv_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vdiv_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vdiv_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vdiv_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vdiv_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vdiv_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vdiv_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vdiv_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vdiv_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vdiv_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vdiv_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vdiv_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vdiv_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vdiv_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vdiv_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vdiv_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vdiv_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vdiv_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vdiv_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vdiv_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vdiv_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vdiv_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vdiv_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vdiv_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vdiv_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vdiv_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vdiv_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vdiv_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vdiv_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vdiv_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vdiv_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vdiv_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vdiv_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vdiv_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vdiv_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vdiv_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vdiv_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vdiv_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vdiv_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vdiv_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vdiv_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vdiv_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vdiv_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vdiv_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vdiv_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vdiv_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vdiv_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vdiv_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vdiv_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vdiv_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vdiv_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vdiv_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vdiv_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vdiv_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vdiv_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vdiv_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vdiv_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vdiv_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vdiv_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vdiv_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vdiv_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vdiv_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vdiv_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vdiv_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vdiv_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vdiv_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vdiv_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vdiv_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vdiv_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vdiv_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vdiv_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vdiv_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vdiv_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vdiv_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vdiv_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vdiv_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vdiv_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vdiv_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vdiv_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vdiv_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vdiv_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vdiv_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vdiv_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vdiv_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vdiv_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vdiv_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vdiv_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vdiv_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vdiv_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vdiv_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vdiv_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vdiv_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vdiv_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vdiv_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vdiv_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vdiv_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vdiv_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vdiv_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vdiv_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vdiv_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vdiv_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vdiv_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vdiv_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vdiv_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vdiv_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vdiv_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vdiv_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vdiv_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vdiv_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vdiv_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vdiv_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vdiv_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vdiv_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vdiv_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vdiv_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vdiv_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vdiv_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vdiv_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vdiv_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vdiv_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vdiv_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vdiv_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vdiv_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vdiv_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vdiv_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vdiv_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vdiv_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vdiv_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vdiv_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vdiv_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vdiv_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vdiv_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vdiv_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vdiv_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vdiv_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vdiv_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vdiv_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vdiv_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vdiv_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vdiv_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vdiv_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vdiv_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vdiv_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vdiv_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vdiv_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vdiv_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vdiv_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vdiv_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vdiv_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vdiv_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vdiv_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vdiv_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vdiv_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vdiv_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vdiv_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vdiv_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vdiv_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vdiv_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vdiv_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vdiv_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vdiv_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vdiv_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vdiv_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vdiv_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vdiv_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vdiv_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vdiv_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vdiv_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vdiv_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vdiv_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vdiv_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vdiv_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vdiv_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vdiv_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vdiv_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vdiv_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vdiv_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vdiv_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vdiv_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vdiv_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vdiv_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vdiv_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vdiv_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vdiv_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vdiv_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vdiv_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vdiv_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vdiv_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vdiv_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vdiv_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vdiv_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vdiv_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vdiv_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vdiv_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vdiv_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vdivu.c b/auto-generated/policy_funcs/llvm-api-tests/vdivu.c index 72320a598..70b222306 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vdivu.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vdivu.c @@ -5,706 +5,939 @@ #include -vuint8mf8_t test_vdivu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vdivu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vdivu_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vdivu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vdivu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vdivu_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vdivu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vdivu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vdivu_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vdivu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vdivu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vdivu_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vdivu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vdivu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vdivu_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vdivu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vdivu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vdivu_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vdivu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vdivu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vdivu_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vdivu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vdivu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vdivu_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vdivu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vdivu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vdivu_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vdivu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vdivu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vdivu_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vdivu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vdivu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vdivu_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vdivu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vdivu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vdivu_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vdivu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vdivu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { return __riscv_vdivu_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vdivu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vdivu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vdivu_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vdivu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vdivu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vdivu_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vdivu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vdivu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vdivu_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vdivu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vdivu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vdivu_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vdivu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vdivu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vdivu_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vdivu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vdivu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vdivu_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vdivu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vdivu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vdivu_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vdivu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vdivu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vdivu_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vdivu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vdivu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vdivu_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vdivu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vdivu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vdivu_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vdivu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vdivu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vdivu_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vdivu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vdivu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vdivu_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vdivu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vdivu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vdivu_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vdivu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vdivu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vdivu_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vdivu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vdivu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vdivu_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vdivu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vdivu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vdivu_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vdivu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vdivu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vdivu_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vdivu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vdivu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vdivu_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vdivu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vdivu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vdivu_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vdivu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vdivu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vdivu_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vdivu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vdivu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vdivu_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vdivu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vdivu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vdivu_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vdivu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vdivu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vdivu_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vdivu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vdivu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vdivu_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vdivu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vdivu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vdivu_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vdivu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vdivu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vdivu_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vdivu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vdivu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vdivu_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vdivu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vdivu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vdivu_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vdivu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vdivu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vdivu_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vdivu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vdivu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vdivu_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vdivu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vdivu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vdivu_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vdivu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vdivu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vdivu_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vdivu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vdivu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vdivu_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vdivu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vdivu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vdivu_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vdivu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vdivu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vdivu_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vdivu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vdivu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vdivu_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vdivu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vdivu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vdivu_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vdivu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vdivu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vdivu_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vdivu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vdivu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vdivu_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vdivu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vdivu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vdivu_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vdivu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vdivu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vdivu_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vdivu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vdivu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vdivu_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vdivu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vdivu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vdivu_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vdivu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vdivu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vdivu_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vdivu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vdivu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vdivu_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vdivu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vdivu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vdivu_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vdivu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vdivu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vdivu_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vdivu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vdivu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vdivu_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vdivu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vdivu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vdivu_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vdivu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vdivu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vdivu_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vdivu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vdivu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vdivu_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vdivu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vdivu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vdivu_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vdivu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vdivu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vdivu_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vdivu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vdivu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vdivu_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vdivu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vdivu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vdivu_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vdivu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vdivu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vdivu_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vdivu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vdivu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vdivu_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vdivu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vdivu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vdivu_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vdivu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vdivu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vdivu_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vdivu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vdivu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vdivu_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vdivu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vdivu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vdivu_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vdivu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vdivu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vdivu_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vdivu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vdivu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vdivu_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vdivu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vdivu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vdivu_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vdivu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vdivu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vdivu_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vdivu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vdivu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vdivu_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vdivu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vdivu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { return __riscv_vdivu_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vdivu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vdivu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vdivu_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vdivu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vdivu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vdivu_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vdivu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vdivu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vdivu_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vdivu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vdivu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vdivu_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vdivu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vdivu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vdivu_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vdivu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vdivu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vdivu_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vdivu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vdivu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vdivu_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vdivu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vdivu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vdivu_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vdivu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vdivu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vdivu_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vdivu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vdivu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vdivu_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vdivu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vdivu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vdivu_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vdivu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vdivu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vdivu_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vdivu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vdivu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vdivu_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vdivu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vdivu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vdivu_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vdivu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vdivu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vdivu_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vdivu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vdivu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vdivu_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vdivu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vdivu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vdivu_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vdivu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vdivu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vdivu_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vdivu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vdivu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vdivu_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vdivu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vdivu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vdivu_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vdivu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vdivu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vdivu_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vdivu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vdivu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vdivu_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vdivu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vdivu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vdivu_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vdivu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vdivu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vdivu_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vdivu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vdivu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vdivu_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vdivu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vdivu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vdivu_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vdivu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vdivu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vdivu_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vdivu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vdivu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vdivu_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vdivu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vdivu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vdivu_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vdivu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vdivu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vdivu_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vdivu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vdivu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vdivu_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vdivu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vdivu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vdivu_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vdivu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vdivu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vdivu_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vdivu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vdivu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vdivu_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vdivu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vdivu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vdivu_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vdivu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vdivu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vdivu_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vdivu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vdivu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vdivu_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vdivu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vdivu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vdivu_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vdivu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vdivu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vdivu_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vdivu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vdivu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vdivu_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vdivu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vdivu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vdivu_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vdivu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vdivu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vdivu_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vdivu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vdivu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vdivu_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vdivu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vdivu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { return __riscv_vdivu_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vdivu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vdivu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vdivu_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vdivu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vdivu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vdivu_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vdivu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vdivu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vdivu_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vdivu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vdivu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vdivu_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vdivu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vdivu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vdivu_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vdivu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vdivu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vdivu_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vdivu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vdivu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vdivu_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vdivu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vdivu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vdivu_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vdivu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vdivu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vdivu_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vdivu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vdivu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vdivu_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vdivu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vdivu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vdivu_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vdivu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vdivu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vdivu_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vdivu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vdivu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vdivu_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vdivu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vdivu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vdivu_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vdivu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vdivu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vdivu_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vdivu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vdivu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vdivu_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vdivu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vdivu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vdivu_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vdivu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vdivu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vdivu_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vdivu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vdivu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vdivu_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vdivu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vdivu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vdivu_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vdivu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vdivu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vdivu_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vdivu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vdivu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vdivu_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vdivu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vdivu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vdivu_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vdivu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vdivu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vdivu_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vdivu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vdivu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vdivu_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vdivu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vdivu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vdivu_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vdivu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vdivu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vdivu_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vdivu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vdivu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vdivu_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vdivu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vdivu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vdivu_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vdivu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vdivu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vdivu_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vdivu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vdivu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vdivu_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vdivu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vdivu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vdivu_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vdivu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vdivu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vdivu_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vdivu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vdivu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vdivu_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vdivu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vdivu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vdivu_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vdivu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vdivu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vdivu_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vdivu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vdivu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vdivu_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vdivu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vdivu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vdivu_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vdivu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vdivu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vdivu_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vdivu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vdivu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vdivu_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vdivu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vdivu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vdivu_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vdivu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vdivu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vdivu_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vdivu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vdivu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vdivu_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vdivu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vdivu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vdivu_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vdivu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vdivu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vdivu_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vdivu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vdivu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vdivu_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vdivu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vdivu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vdivu_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vdivu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vdivu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vdivu_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vdivu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vdivu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vdivu_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vdivu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vdivu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vdivu_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vdivu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vdivu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vdivu_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vdivu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vdivu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vdivu_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfabs.c b/auto-generated/policy_funcs/llvm-api-tests/vfabs.c index 1d7c68a47..a903088d8 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfabs.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfabs.c @@ -1,247 +1,307 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vfabs_v_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfabs_v_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfabs_v_f16mf4_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfabs_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfabs_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfabs_v_f16mf2_tu(vd, vs2, vl); } -vfloat16m1_t test_vfabs_v_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfabs_v_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfabs_v_f16m1_tu(vd, vs2, vl); } -vfloat16m2_t test_vfabs_v_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfabs_v_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfabs_v_f16m2_tu(vd, vs2, vl); } -vfloat16m4_t test_vfabs_v_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfabs_v_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfabs_v_f16m4_tu(vd, vs2, vl); } -vfloat16m8_t test_vfabs_v_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfabs_v_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfabs_v_f16m8_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfabs_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfabs_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfabs_v_f32mf2_tu(vd, vs2, vl); } -vfloat32m1_t test_vfabs_v_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfabs_v_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfabs_v_f32m1_tu(vd, vs2, vl); } -vfloat32m2_t test_vfabs_v_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfabs_v_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfabs_v_f32m2_tu(vd, vs2, vl); } -vfloat32m4_t test_vfabs_v_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfabs_v_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfabs_v_f32m4_tu(vd, vs2, vl); } -vfloat32m8_t test_vfabs_v_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfabs_v_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfabs_v_f32m8_tu(vd, vs2, vl); } -vfloat64m1_t test_vfabs_v_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfabs_v_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfabs_v_f64m1_tu(vd, vs2, vl); } -vfloat64m2_t test_vfabs_v_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfabs_v_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfabs_v_f64m2_tu(vd, vs2, vl); } -vfloat64m4_t test_vfabs_v_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfabs_v_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfabs_v_f64m4_tu(vd, vs2, vl); } -vfloat64m8_t test_vfabs_v_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfabs_v_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfabs_v_f64m8_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfabs_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfabs_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfabs_v_f16mf4_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfabs_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfabs_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfabs_v_f16mf2_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfabs_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfabs_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfabs_v_f16m1_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfabs_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfabs_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfabs_v_f16m2_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfabs_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfabs_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfabs_v_f16m4_tum(vm, vd, vs2, vl); } -vfloat16m8_t test_vfabs_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfabs_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfabs_v_f16m8_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfabs_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfabs_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfabs_v_f32mf2_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfabs_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfabs_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfabs_v_f32m1_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfabs_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfabs_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfabs_v_f32m2_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfabs_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfabs_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfabs_v_f32m4_tum(vm, vd, vs2, vl); } -vfloat32m8_t test_vfabs_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfabs_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfabs_v_f32m8_tum(vm, vd, vs2, vl); } -vfloat64m1_t test_vfabs_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfabs_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfabs_v_f64m1_tum(vm, vd, vs2, vl); } -vfloat64m2_t test_vfabs_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfabs_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfabs_v_f64m2_tum(vm, vd, vs2, vl); } -vfloat64m4_t test_vfabs_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfabs_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfabs_v_f64m4_tum(vm, vd, vs2, vl); } -vfloat64m8_t test_vfabs_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfabs_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfabs_v_f64m8_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfabs_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfabs_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfabs_v_f16mf4_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfabs_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfabs_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfabs_v_f16mf2_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfabs_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfabs_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfabs_v_f16m1_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfabs_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfabs_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfabs_v_f16m2_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfabs_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfabs_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfabs_v_f16m4_tumu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfabs_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfabs_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfabs_v_f16m8_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfabs_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfabs_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfabs_v_f32mf2_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfabs_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfabs_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfabs_v_f32m1_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfabs_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfabs_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfabs_v_f32m2_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfabs_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfabs_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfabs_v_f32m4_tumu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfabs_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfabs_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfabs_v_f32m8_tumu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfabs_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfabs_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfabs_v_f64m1_tumu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfabs_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfabs_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfabs_v_f64m2_tumu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfabs_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfabs_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfabs_v_f64m4_tumu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfabs_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfabs_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfabs_v_f64m8_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfabs_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfabs_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfabs_v_f16mf4_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfabs_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfabs_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfabs_v_f16mf2_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfabs_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfabs_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfabs_v_f16m1_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfabs_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfabs_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfabs_v_f16m2_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfabs_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfabs_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfabs_v_f16m4_mu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfabs_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfabs_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfabs_v_f16m8_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfabs_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfabs_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfabs_v_f32mf2_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfabs_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfabs_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfabs_v_f32m1_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfabs_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfabs_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfabs_v_f32m2_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfabs_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfabs_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfabs_v_f32m4_mu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfabs_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfabs_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfabs_v_f32m8_mu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfabs_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfabs_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfabs_v_f64m1_mu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfabs_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfabs_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfabs_v_f64m2_mu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfabs_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfabs_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfabs_v_f64m4_mu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfabs_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfabs_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfabs_v_f64m8_mu(vm, vd, vs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfadd.c b/auto-generated/policy_funcs/llvm-api-tests/vfadd.c index b3f192232..fde0ae4c7 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfadd.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfadd.c @@ -1,967 +1,1354 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vfadd_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfadd_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + vfloat16mf4_t vs1, size_t vl) { return __riscv_vfadd_vv_f16mf4_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfadd_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfadd_vf_f16mf4_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfadd_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + vfloat16mf2_t vs1, size_t vl) { return __riscv_vfadd_vv_f16mf2_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfadd_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfadd_vf_f16mf2_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfadd_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfadd_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfadd_vv_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfadd_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfadd_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfadd_vf_f16m1_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfadd_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfadd_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + vfloat16m2_t vs1, size_t vl) { return __riscv_vfadd_vv_f16m2_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vfadd_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfadd_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfadd_vf_f16m2_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfadd_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfadd_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + vfloat16m4_t vs1, size_t vl) { return __riscv_vfadd_vv_f16m4_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vfadd_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfadd_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfadd_vf_f16m4_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfadd_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfadd_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + vfloat16m8_t vs1, size_t vl) { return __riscv_vfadd_vv_f16m8_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vfadd_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfadd_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfadd_vf_f16m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfadd_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + vfloat32mf2_t vs1, size_t vl) { return __riscv_vfadd_vv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfadd_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + float rs1, size_t vl) { return __riscv_vfadd_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfadd_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfadd_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfadd_vv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfadd_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfadd_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + float rs1, size_t vl) { return __riscv_vfadd_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfadd_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfadd_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + vfloat32m2_t vs1, size_t vl) { return __riscv_vfadd_vv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfadd_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfadd_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + float rs1, size_t vl) { return __riscv_vfadd_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfadd_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfadd_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + vfloat32m4_t vs1, size_t vl) { return __riscv_vfadd_vv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfadd_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfadd_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + float rs1, size_t vl) { return __riscv_vfadd_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfadd_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfadd_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + vfloat32m8_t vs1, size_t vl) { return __riscv_vfadd_vv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfadd_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfadd_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + float rs1, size_t vl) { return __riscv_vfadd_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfadd_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfadd_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfadd_vv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfadd_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfadd_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + double rs1, size_t vl) { return __riscv_vfadd_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfadd_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfadd_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + vfloat64m2_t vs1, size_t vl) { return __riscv_vfadd_vv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfadd_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfadd_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + double rs1, size_t vl) { return __riscv_vfadd_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfadd_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfadd_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + vfloat64m4_t vs1, size_t vl) { return __riscv_vfadd_vv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfadd_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfadd_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + double rs1, size_t vl) { return __riscv_vfadd_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfadd_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfadd_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + vfloat64m8_t vs1, size_t vl) { return __riscv_vfadd_vv_f64m8_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfadd_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfadd_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + double rs1, size_t vl) { return __riscv_vfadd_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfadd_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfadd_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16mf4_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfadd_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16mf4_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfadd_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfadd_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfadd_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfadd_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfadd_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfadd_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16m1_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfadd_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfadd_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16m2_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfadd_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfadd_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16m2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfadd_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfadd_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16m4_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfadd_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfadd_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16m4_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfadd_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfadd_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16m8_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfadd_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfadd_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfadd_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfadd_vv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfadd_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfadd_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfadd_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfadd_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfadd_vv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfadd_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfadd_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfadd_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfadd_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfadd_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfadd_vv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfadd_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfadd_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfadd_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfadd_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfadd_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfadd_vv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfadd_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfadd_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfadd_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfadd_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfadd_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfadd_vv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfadd_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfadd_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfadd_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfadd_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfadd_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfadd_vv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfadd_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfadd_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vfadd_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfadd_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfadd_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfadd_vv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfadd_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfadd_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vfadd_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfadd_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfadd_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfadd_vv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfadd_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfadd_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vfadd_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfadd_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfadd_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfadd_vv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfadd_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfadd_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, size_t vl) { return __riscv_vfadd_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfadd_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfadd_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16mf4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfadd_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16mf4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfadd_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfadd_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfadd_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfadd_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfadd_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfadd_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfadd_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfadd_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfadd_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfadd_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfadd_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfadd_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfadd_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfadd_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfadd_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfadd_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfadd_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfadd_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfadd_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfadd_vv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfadd_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfadd_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfadd_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfadd_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfadd_vv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfadd_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfadd_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfadd_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfadd_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfadd_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfadd_vv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfadd_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfadd_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfadd_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfadd_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfadd_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfadd_vv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfadd_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfadd_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfadd_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfadd_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfadd_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfadd_vv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfadd_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfadd_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfadd_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfadd_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfadd_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfadd_vv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfadd_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfadd_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vfadd_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfadd_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfadd_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfadd_vv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfadd_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfadd_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vfadd_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfadd_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfadd_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfadd_vv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfadd_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfadd_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vfadd_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfadd_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfadd_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfadd_vv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfadd_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfadd_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, size_t vl) { return __riscv_vfadd_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfadd_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfadd_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16mf4_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfadd_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16mf4_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfadd_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfadd_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfadd_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfadd_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16m1_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfadd_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfadd_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, size_t vl) { return __riscv_vfadd_vf_f16m1_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfadd_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfadd_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16m2_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfadd_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfadd_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, size_t vl) { return __riscv_vfadd_vf_f16m2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfadd_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfadd_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16m4_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfadd_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfadd_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, size_t vl) { return __riscv_vfadd_vf_f16m4_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfadd_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfadd_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16m8_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfadd_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfadd_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, size_t vl) { return __riscv_vfadd_vf_f16m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfadd_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfadd_vv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfadd_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, size_t vl) { return __riscv_vfadd_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfadd_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfadd_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfadd_vv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfadd_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfadd_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfadd_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfadd_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfadd_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfadd_vv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfadd_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfadd_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfadd_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfadd_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfadd_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfadd_vv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfadd_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfadd_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfadd_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfadd_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfadd_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfadd_vv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfadd_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfadd_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfadd_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfadd_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfadd_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfadd_vv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfadd_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfadd_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vfadd_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfadd_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfadd_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfadd_vv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfadd_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfadd_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vfadd_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfadd_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfadd_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfadd_vv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfadd_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfadd_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vfadd_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfadd_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfadd_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfadd_vv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfadd_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfadd_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, size_t vl) { return __riscv_vfadd_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfadd_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfadd_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + vfloat16mf4_t vs1, size_t vl) { return __riscv_vfadd_vv_f16mf4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfadd_vf_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfadd_vf_f16mf4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfadd_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + vfloat16mf2_t vs1, size_t vl) { return __riscv_vfadd_vv_f16mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfadd_vf_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfadd_vf_f16mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfadd_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfadd_vv_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vf_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfadd_vf_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfadd_vf_f16m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfadd_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + vfloat16m2_t vs1, size_t vl) { return __riscv_vfadd_vv_f16m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vf_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfadd_vf_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfadd_vf_f16m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfadd_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + vfloat16m4_t vs1, size_t vl) { return __riscv_vfadd_vv_f16m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vf_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfadd_vf_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfadd_vf_f16m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfadd_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + vfloat16m8_t vs1, size_t vl) { return __riscv_vfadd_vv_f16m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vf_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfadd_vf_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfadd_vf_f16m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfadd_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + vfloat32mf2_t vs1, size_t vl) { return __riscv_vfadd_vv_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfadd_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + float rs1, size_t vl) { return __riscv_vfadd_vf_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfadd_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfadd_vv_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfadd_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + float rs1, size_t vl) { return __riscv_vfadd_vf_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfadd_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + vfloat32m2_t vs1, size_t vl) { return __riscv_vfadd_vv_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfadd_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + float rs1, size_t vl) { return __riscv_vfadd_vf_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfadd_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + vfloat32m4_t vs1, size_t vl) { return __riscv_vfadd_vv_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfadd_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + float rs1, size_t vl) { return __riscv_vfadd_vf_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfadd_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + vfloat32m8_t vs1, size_t vl) { return __riscv_vfadd_vv_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfadd_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + float rs1, size_t vl) { return __riscv_vfadd_vf_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfadd_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfadd_vv_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfadd_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + double rs1, size_t vl) { return __riscv_vfadd_vf_f64m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfadd_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + vfloat64m2_t vs1, size_t vl) { return __riscv_vfadd_vv_f64m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfadd_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + double rs1, size_t vl) { return __riscv_vfadd_vf_f64m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfadd_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + vfloat64m4_t vs1, size_t vl) { return __riscv_vfadd_vv_f64m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfadd_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + double rs1, size_t vl) { return __riscv_vfadd_vf_f64m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfadd_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + vfloat64m8_t vs1, size_t vl) { return __riscv_vfadd_vv_f64m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfadd_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + double rs1, size_t vl) { return __riscv_vfadd_vf_f64m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfadd_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfadd_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16mf4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfadd_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16mf4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfadd_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16mf2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfadd_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfadd_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfadd_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfadd_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfadd_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfadd_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfadd_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfadd_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfadd_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfadd_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfadd_vv_f32mf2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfadd_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfadd_vf_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfadd_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfadd_vv_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfadd_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, + size_t vl) { return __riscv_vfadd_vf_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfadd_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfadd_vv_f32m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfadd_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, + size_t vl) { return __riscv_vfadd_vf_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfadd_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfadd_vv_f32m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfadd_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, + size_t vl) { return __riscv_vfadd_vf_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfadd_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfadd_vv_f32m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfadd_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, + size_t vl) { return __riscv_vfadd_vf_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfadd_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfadd_vv_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfadd_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, + size_t vl) { return __riscv_vfadd_vf_f64m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfadd_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfadd_vv_f64m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfadd_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, + size_t vl) { return __riscv_vfadd_vf_f64m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfadd_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfadd_vv_f64m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfadd_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, + size_t vl) { return __riscv_vfadd_vf_f64m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfadd_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfadd_vv_f64m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfadd_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, + size_t vl) { return __riscv_vfadd_vf_f64m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfadd_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfadd_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16mf4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfadd_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16mf4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfadd_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16mf2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfadd_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfadd_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfadd_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfadd_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfadd_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfadd_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfadd_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfadd_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfadd_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfadd_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfadd_vv_f32mf2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfadd_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfadd_vf_f32mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfadd_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfadd_vv_f32m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfadd_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, + size_t vl) { return __riscv_vfadd_vf_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfadd_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfadd_vv_f32m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfadd_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, + size_t vl) { return __riscv_vfadd_vf_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfadd_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfadd_vv_f32m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfadd_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, + size_t vl) { return __riscv_vfadd_vf_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfadd_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfadd_vv_f32m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfadd_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, + size_t vl) { return __riscv_vfadd_vf_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfadd_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfadd_vv_f64m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfadd_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, + size_t vl) { return __riscv_vfadd_vf_f64m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfadd_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfadd_vv_f64m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfadd_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, + size_t vl) { return __riscv_vfadd_vf_f64m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfadd_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfadd_vv_f64m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfadd_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, + size_t vl) { return __riscv_vfadd_vf_f64m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfadd_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfadd_vv_f64m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfadd_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, + size_t vl) { return __riscv_vfadd_vf_f64m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfadd_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfadd_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16mf4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfadd_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfadd_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16mf4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfadd_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16mf2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfadd_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfadd_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfadd_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfadd_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfadd_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfadd_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfadd_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfadd_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfadd_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfadd_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfadd_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfadd_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfadd_vv_f16m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfadd_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfadd_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfadd_vf_f16m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfadd_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfadd_vv_f32mf2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfadd_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfadd_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfadd_vf_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfadd_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfadd_vv_f32m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfadd_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfadd_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfadd_vf_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfadd_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfadd_vv_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfadd_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfadd_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfadd_vf_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfadd_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfadd_vv_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfadd_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfadd_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfadd_vf_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfadd_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfadd_vv_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfadd_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfadd_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfadd_vf_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfadd_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfadd_vv_f64m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfadd_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfadd_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, + size_t vl) { return __riscv_vfadd_vf_f64m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfadd_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfadd_vv_f64m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfadd_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfadd_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, + size_t vl) { return __riscv_vfadd_vf_f64m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfadd_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfadd_vv_f64m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfadd_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfadd_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, + size_t vl) { return __riscv_vfadd_vf_f64m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfadd_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfadd_vv_f64m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfadd_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfadd_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, + size_t vl) { return __riscv_vfadd_vf_f64m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfclass.c b/auto-generated/policy_funcs/llvm-api-tests/vfclass.c index 9e6eb6962..e412f9ac6 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfclass.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfclass.c @@ -1,247 +1,307 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint16mf4_t test_vfclass_v_u16mf4_tu(vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vfclass_v_u16mf4_tu(vuint16mf4_t vd, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfclass_v_u16mf4_tu(vd, vs2, vl); } -vuint16mf2_t test_vfclass_v_u16mf2_tu(vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vfclass_v_u16mf2_tu(vuint16mf2_t vd, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfclass_v_u16mf2_tu(vd, vs2, vl); } -vuint16m1_t test_vfclass_v_u16m1_tu(vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vuint16m1_t test_vfclass_v_u16m1_tu(vuint16m1_t vd, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfclass_v_u16m1_tu(vd, vs2, vl); } -vuint16m2_t test_vfclass_v_u16m2_tu(vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vuint16m2_t test_vfclass_v_u16m2_tu(vuint16m2_t vd, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfclass_v_u16m2_tu(vd, vs2, vl); } -vuint16m4_t test_vfclass_v_u16m4_tu(vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vuint16m4_t test_vfclass_v_u16m4_tu(vuint16m4_t vd, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfclass_v_u16m4_tu(vd, vs2, vl); } -vuint16m8_t test_vfclass_v_u16m8_tu(vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vuint16m8_t test_vfclass_v_u16m8_tu(vuint16m8_t vd, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfclass_v_u16m8_tu(vd, vs2, vl); } -vuint32mf2_t test_vfclass_v_u32mf2_tu(vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vfclass_v_u32mf2_tu(vuint32mf2_t vd, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfclass_v_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vfclass_v_u32m1_tu(vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vuint32m1_t test_vfclass_v_u32m1_tu(vuint32m1_t vd, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfclass_v_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vfclass_v_u32m2_tu(vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vuint32m2_t test_vfclass_v_u32m2_tu(vuint32m2_t vd, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfclass_v_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vfclass_v_u32m4_tu(vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vuint32m4_t test_vfclass_v_u32m4_tu(vuint32m4_t vd, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfclass_v_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vfclass_v_u32m8_tu(vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vuint32m8_t test_vfclass_v_u32m8_tu(vuint32m8_t vd, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfclass_v_u32m8_tu(vd, vs2, vl); } -vuint64m1_t test_vfclass_v_u64m1_tu(vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vuint64m1_t test_vfclass_v_u64m1_tu(vuint64m1_t vd, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfclass_v_u64m1_tu(vd, vs2, vl); } -vuint64m2_t test_vfclass_v_u64m2_tu(vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vuint64m2_t test_vfclass_v_u64m2_tu(vuint64m2_t vd, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfclass_v_u64m2_tu(vd, vs2, vl); } -vuint64m4_t test_vfclass_v_u64m4_tu(vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vuint64m4_t test_vfclass_v_u64m4_tu(vuint64m4_t vd, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfclass_v_u64m4_tu(vd, vs2, vl); } -vuint64m8_t test_vfclass_v_u64m8_tu(vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vuint64m8_t test_vfclass_v_u64m8_tu(vuint64m8_t vd, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfclass_v_u64m8_tu(vd, vs2, vl); } -vuint16mf4_t test_vfclass_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vfclass_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfclass_v_u16mf4_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vfclass_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vfclass_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfclass_v_u16mf2_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vfclass_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vuint16m1_t test_vfclass_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfclass_v_u16m1_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vfclass_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vuint16m2_t test_vfclass_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfclass_v_u16m2_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vfclass_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vuint16m4_t test_vfclass_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfclass_v_u16m4_tum(vm, vd, vs2, vl); } -vuint16m8_t test_vfclass_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vuint16m8_t test_vfclass_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfclass_v_u16m8_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vfclass_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vfclass_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfclass_v_u32mf2_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vfclass_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vuint32m1_t test_vfclass_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfclass_v_u32m1_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vfclass_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vuint32m2_t test_vfclass_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfclass_v_u32m2_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vfclass_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vuint32m4_t test_vfclass_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfclass_v_u32m4_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vfclass_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vuint32m8_t test_vfclass_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfclass_v_u32m8_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vfclass_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vuint64m1_t test_vfclass_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfclass_v_u64m1_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vfclass_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vuint64m2_t test_vfclass_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfclass_v_u64m2_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vfclass_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vuint64m4_t test_vfclass_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfclass_v_u64m4_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vfclass_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vuint64m8_t test_vfclass_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfclass_v_u64m8_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vfclass_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vfclass_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfclass_v_u16mf4_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vfclass_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vfclass_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfclass_v_u16mf2_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vfclass_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vuint16m1_t test_vfclass_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfclass_v_u16m1_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vfclass_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vuint16m2_t test_vfclass_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfclass_v_u16m2_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vfclass_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vuint16m4_t test_vfclass_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfclass_v_u16m4_tumu(vm, vd, vs2, vl); } -vuint16m8_t test_vfclass_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vuint16m8_t test_vfclass_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfclass_v_u16m8_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfclass_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vfclass_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfclass_v_u32mf2_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vfclass_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vuint32m1_t test_vfclass_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfclass_v_u32m1_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vfclass_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vuint32m2_t test_vfclass_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfclass_v_u32m2_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vfclass_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vuint32m4_t test_vfclass_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfclass_v_u32m4_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vfclass_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vuint32m8_t test_vfclass_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfclass_v_u32m8_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vfclass_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vuint64m1_t test_vfclass_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfclass_v_u64m1_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vfclass_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vuint64m2_t test_vfclass_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfclass_v_u64m2_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vfclass_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vuint64m4_t test_vfclass_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfclass_v_u64m4_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vfclass_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vuint64m8_t test_vfclass_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfclass_v_u64m8_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vfclass_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vfclass_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfclass_v_u16mf4_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vfclass_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vfclass_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfclass_v_u16mf2_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vfclass_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vuint16m1_t test_vfclass_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfclass_v_u16m1_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vfclass_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vuint16m2_t test_vfclass_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfclass_v_u16m2_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vfclass_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vuint16m4_t test_vfclass_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfclass_v_u16m4_mu(vm, vd, vs2, vl); } -vuint16m8_t test_vfclass_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vuint16m8_t test_vfclass_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfclass_v_u16m8_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfclass_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vfclass_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfclass_v_u32mf2_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vfclass_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vuint32m1_t test_vfclass_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfclass_v_u32m1_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vfclass_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vuint32m2_t test_vfclass_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfclass_v_u32m2_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vfclass_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vuint32m4_t test_vfclass_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfclass_v_u32m4_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vfclass_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vuint32m8_t test_vfclass_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfclass_v_u32m8_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vfclass_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vuint64m1_t test_vfclass_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfclass_v_u64m1_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vfclass_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vuint64m2_t test_vfclass_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfclass_v_u64m2_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vfclass_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vuint64m4_t test_vfclass_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfclass_v_u64m4_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vfclass_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vuint64m8_t test_vfclass_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfclass_v_u64m8_mu(vm, vd, vs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfcvt.c b/auto-generated/policy_funcs/llvm-api-tests/vfcvt.c index 9ad815e83..1e771a644 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfcvt.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfcvt.c @@ -1,1927 +1,2407 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vint16mf4_t test_vfcvt_x_f_v_i16mf4_tu(vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vint16mf4_t test_vfcvt_x_f_v_i16mf4_tu(vint16mf4_t vd, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfcvt_x_f_v_i16mf4_tu(vd, vs2, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2_tu(vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vint16mf2_t test_vfcvt_x_f_v_i16mf2_tu(vint16mf2_t vd, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfcvt_x_f_v_i16mf2_tu(vd, vs2, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1_tu(vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vint16m1_t test_vfcvt_x_f_v_i16m1_tu(vint16m1_t vd, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfcvt_x_f_v_i16m1_tu(vd, vs2, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2_tu(vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vint16m2_t test_vfcvt_x_f_v_i16m2_tu(vint16m2_t vd, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfcvt_x_f_v_i16m2_tu(vd, vs2, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4_tu(vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vint16m4_t test_vfcvt_x_f_v_i16m4_tu(vint16m4_t vd, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfcvt_x_f_v_i16m4_tu(vd, vs2, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8_tu(vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vint16m8_t test_vfcvt_x_f_v_i16m8_tu(vint16m8_t vd, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfcvt_x_f_v_i16m8_tu(vd, vs2, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_tu(vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_tu(vuint16mf4_t vd, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfcvt_xu_f_v_u16mf4_tu(vd, vs2, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_tu(vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_tu(vuint16mf2_t vd, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfcvt_xu_f_v_u16mf2_tu(vd, vs2, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1_tu(vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vuint16m1_t test_vfcvt_xu_f_v_u16m1_tu(vuint16m1_t vd, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfcvt_xu_f_v_u16m1_tu(vd, vs2, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2_tu(vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vuint16m2_t test_vfcvt_xu_f_v_u16m2_tu(vuint16m2_t vd, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfcvt_xu_f_v_u16m2_tu(vd, vs2, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4_tu(vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vuint16m4_t test_vfcvt_xu_f_v_u16m4_tu(vuint16m4_t vd, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfcvt_xu_f_v_u16m4_tu(vd, vs2, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8_tu(vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vuint16m8_t test_vfcvt_xu_f_v_u16m8_tu(vuint16m8_t vd, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfcvt_xu_f_v_u16m8_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_tu(vfloat16mf4_t vd, vint16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_tu(vfloat16mf4_t vd, vint16mf4_t vs2, + size_t vl) { return __riscv_vfcvt_f_x_v_f16mf4_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_tu(vfloat16mf2_t vd, vint16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_tu(vfloat16mf2_t vd, vint16mf2_t vs2, + size_t vl) { return __riscv_vfcvt_f_x_v_f16mf2_tu(vd, vs2, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1_tu(vfloat16m1_t vd, vint16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfcvt_f_x_v_f16m1_tu(vfloat16m1_t vd, vint16m1_t vs2, + size_t vl) { return __riscv_vfcvt_f_x_v_f16m1_tu(vd, vs2, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2_tu(vfloat16m2_t vd, vint16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfcvt_f_x_v_f16m2_tu(vfloat16m2_t vd, vint16m2_t vs2, + size_t vl) { return __riscv_vfcvt_f_x_v_f16m2_tu(vd, vs2, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4_tu(vfloat16m4_t vd, vint16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfcvt_f_x_v_f16m4_tu(vfloat16m4_t vd, vint16m4_t vs2, + size_t vl) { return __riscv_vfcvt_f_x_v_f16m4_tu(vd, vs2, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8_tu(vfloat16m8_t vd, vint16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfcvt_f_x_v_f16m8_tu(vfloat16m8_t vd, vint16m8_t vs2, + size_t vl) { return __riscv_vfcvt_f_x_v_f16m8_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_tu(vfloat16mf4_t vd, vuint16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_tu(vfloat16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { return __riscv_vfcvt_f_xu_v_f16mf4_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_tu(vfloat16mf2_t vd, vuint16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_tu(vfloat16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { return __riscv_vfcvt_f_xu_v_f16mf2_tu(vd, vs2, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_tu(vfloat16m1_t vd, vuint16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_tu(vfloat16m1_t vd, vuint16m1_t vs2, + size_t vl) { return __riscv_vfcvt_f_xu_v_f16m1_tu(vd, vs2, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_tu(vfloat16m2_t vd, vuint16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_tu(vfloat16m2_t vd, vuint16m2_t vs2, + size_t vl) { return __riscv_vfcvt_f_xu_v_f16m2_tu(vd, vs2, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_tu(vfloat16m4_t vd, vuint16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_tu(vfloat16m4_t vd, vuint16m4_t vs2, + size_t vl) { return __riscv_vfcvt_f_xu_v_f16m4_tu(vd, vs2, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_tu(vfloat16m8_t vd, vuint16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_tu(vfloat16m8_t vd, vuint16m8_t vs2, + size_t vl) { return __riscv_vfcvt_f_xu_v_f16m8_tu(vd, vs2, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2_tu(vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vint32mf2_t test_vfcvt_x_f_v_i32mf2_tu(vint32mf2_t vd, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfcvt_x_f_v_i32mf2_tu(vd, vs2, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1_tu(vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vint32m1_t test_vfcvt_x_f_v_i32m1_tu(vint32m1_t vd, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfcvt_x_f_v_i32m1_tu(vd, vs2, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2_tu(vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vint32m2_t test_vfcvt_x_f_v_i32m2_tu(vint32m2_t vd, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfcvt_x_f_v_i32m2_tu(vd, vs2, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4_tu(vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vint32m4_t test_vfcvt_x_f_v_i32m4_tu(vint32m4_t vd, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfcvt_x_f_v_i32m4_tu(vd, vs2, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8_tu(vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vint32m8_t test_vfcvt_x_f_v_i32m8_tu(vint32m8_t vd, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfcvt_x_f_v_i32m8_tu(vd, vs2, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tu(vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tu(vuint32mf2_t vd, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfcvt_xu_f_v_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1_tu(vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vuint32m1_t test_vfcvt_xu_f_v_u32m1_tu(vuint32m1_t vd, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfcvt_xu_f_v_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2_tu(vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vuint32m2_t test_vfcvt_xu_f_v_u32m2_tu(vuint32m2_t vd, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfcvt_xu_f_v_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4_tu(vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vuint32m4_t test_vfcvt_xu_f_v_u32m4_tu(vuint32m4_t vd, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfcvt_xu_f_v_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8_tu(vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vuint32m8_t test_vfcvt_xu_f_v_u32m8_tu(vuint32m8_t vd, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfcvt_xu_f_v_u32m8_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tu(vfloat32mf2_t vd, vint32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tu(vfloat32mf2_t vd, vint32mf2_t vs2, + size_t vl) { return __riscv_vfcvt_f_x_v_f32mf2_tu(vd, vs2, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1_tu(vfloat32m1_t vd, vint32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfcvt_f_x_v_f32m1_tu(vfloat32m1_t vd, vint32m1_t vs2, + size_t vl) { return __riscv_vfcvt_f_x_v_f32m1_tu(vd, vs2, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2_tu(vfloat32m2_t vd, vint32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfcvt_f_x_v_f32m2_tu(vfloat32m2_t vd, vint32m2_t vs2, + size_t vl) { return __riscv_vfcvt_f_x_v_f32m2_tu(vd, vs2, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4_tu(vfloat32m4_t vd, vint32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfcvt_f_x_v_f32m4_tu(vfloat32m4_t vd, vint32m4_t vs2, + size_t vl) { return __riscv_vfcvt_f_x_v_f32m4_tu(vd, vs2, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8_tu(vfloat32m8_t vd, vint32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfcvt_f_x_v_f32m8_tu(vfloat32m8_t vd, vint32m8_t vs2, + size_t vl) { return __riscv_vfcvt_f_x_v_f32m8_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tu(vfloat32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tu(vfloat32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vfcvt_f_xu_v_f32mf2_tu(vd, vs2, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_tu(vfloat32m1_t vd, vuint32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_tu(vfloat32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vfcvt_f_xu_v_f32m1_tu(vd, vs2, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_tu(vfloat32m2_t vd, vuint32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_tu(vfloat32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vfcvt_f_xu_v_f32m2_tu(vd, vs2, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_tu(vfloat32m4_t vd, vuint32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_tu(vfloat32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vfcvt_f_xu_v_f32m4_tu(vd, vs2, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_tu(vfloat32m8_t vd, vuint32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_tu(vfloat32m8_t vd, vuint32m8_t vs2, + size_t vl) { return __riscv_vfcvt_f_xu_v_f32m8_tu(vd, vs2, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1_tu(vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vint64m1_t test_vfcvt_x_f_v_i64m1_tu(vint64m1_t vd, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfcvt_x_f_v_i64m1_tu(vd, vs2, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2_tu(vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vint64m2_t test_vfcvt_x_f_v_i64m2_tu(vint64m2_t vd, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfcvt_x_f_v_i64m2_tu(vd, vs2, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4_tu(vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vint64m4_t test_vfcvt_x_f_v_i64m4_tu(vint64m4_t vd, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfcvt_x_f_v_i64m4_tu(vd, vs2, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8_tu(vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vint64m8_t test_vfcvt_x_f_v_i64m8_tu(vint64m8_t vd, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfcvt_x_f_v_i64m8_tu(vd, vs2, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1_tu(vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vuint64m1_t test_vfcvt_xu_f_v_u64m1_tu(vuint64m1_t vd, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfcvt_xu_f_v_u64m1_tu(vd, vs2, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2_tu(vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vuint64m2_t test_vfcvt_xu_f_v_u64m2_tu(vuint64m2_t vd, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfcvt_xu_f_v_u64m2_tu(vd, vs2, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4_tu(vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vuint64m4_t test_vfcvt_xu_f_v_u64m4_tu(vuint64m4_t vd, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfcvt_xu_f_v_u64m4_tu(vd, vs2, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8_tu(vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vuint64m8_t test_vfcvt_xu_f_v_u64m8_tu(vuint64m8_t vd, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfcvt_xu_f_v_u64m8_tu(vd, vs2, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1_tu(vfloat64m1_t vd, vint64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfcvt_f_x_v_f64m1_tu(vfloat64m1_t vd, vint64m1_t vs2, + size_t vl) { return __riscv_vfcvt_f_x_v_f64m1_tu(vd, vs2, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2_tu(vfloat64m2_t vd, vint64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfcvt_f_x_v_f64m2_tu(vfloat64m2_t vd, vint64m2_t vs2, + size_t vl) { return __riscv_vfcvt_f_x_v_f64m2_tu(vd, vs2, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4_tu(vfloat64m4_t vd, vint64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfcvt_f_x_v_f64m4_tu(vfloat64m4_t vd, vint64m4_t vs2, + size_t vl) { return __riscv_vfcvt_f_x_v_f64m4_tu(vd, vs2, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8_tu(vfloat64m8_t vd, vint64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfcvt_f_x_v_f64m8_tu(vfloat64m8_t vd, vint64m8_t vs2, + size_t vl) { return __riscv_vfcvt_f_x_v_f64m8_tu(vd, vs2, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_tu(vfloat64m1_t vd, vuint64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_tu(vfloat64m1_t vd, vuint64m1_t vs2, + size_t vl) { return __riscv_vfcvt_f_xu_v_f64m1_tu(vd, vs2, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_tu(vfloat64m2_t vd, vuint64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_tu(vfloat64m2_t vd, vuint64m2_t vs2, + size_t vl) { return __riscv_vfcvt_f_xu_v_f64m2_tu(vd, vs2, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_tu(vfloat64m4_t vd, vuint64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_tu(vfloat64m4_t vd, vuint64m4_t vs2, + size_t vl) { return __riscv_vfcvt_f_xu_v_f64m4_tu(vd, vs2, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_tu(vfloat64m8_t vd, vuint64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_tu(vfloat64m8_t vd, vuint64m8_t vs2, + size_t vl) { return __riscv_vfcvt_f_xu_v_f64m8_tu(vd, vs2, vl); } -vint16mf4_t test_vfcvt_x_f_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vint16mf4_t test_vfcvt_x_f_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16mf4_tum(vm, vd, vs2, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vint16mf2_t test_vfcvt_x_f_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16mf2_tum(vm, vd, vs2, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vint16m1_t test_vfcvt_x_f_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16m1_tum(vm, vd, vs2, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vint16m2_t test_vfcvt_x_f_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16m2_tum(vm, vd, vs2, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vint16m4_t test_vfcvt_x_f_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16m4_tum(vm, vd, vs2, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vint16m8_t test_vfcvt_x_f_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16m8_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16mf4_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16mf2_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vuint16m1_t test_vfcvt_xu_f_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16m1_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vuint16m2_t test_vfcvt_xu_f_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16m2_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vuint16m4_t test_vfcvt_xu_f_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16m4_tum(vm, vd, vs2, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vuint16m8_t test_vfcvt_xu_f_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16m8_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vint16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vint16mf4_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16mf4_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vint16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vint16mf2_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16mf2_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vint16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfcvt_f_x_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vint16m1_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16m1_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vint16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfcvt_f_x_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vint16m2_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16m2_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vint16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfcvt_f_x_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vint16m4_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16m4_tum(vm, vd, vs2, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vint16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfcvt_f_x_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vint16m8_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16m8_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vuint16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16mf4_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vuint16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16mf2_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vuint16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vuint16m1_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16m1_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vuint16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vuint16m2_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16m2_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vuint16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vuint16m4_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16m4_tum(vm, vd, vs2, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vuint16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vuint16m8_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16m8_tum(vm, vd, vs2, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vint32mf2_t test_vfcvt_x_f_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i32mf2_tum(vm, vd, vs2, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vint32m1_t test_vfcvt_x_f_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i32m1_tum(vm, vd, vs2, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vint32m2_t test_vfcvt_x_f_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i32m2_tum(vm, vd, vs2, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vint32m4_t test_vfcvt_x_f_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i32m4_tum(vm, vd, vs2, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vint32m8_t test_vfcvt_x_f_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i32m8_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u32mf2_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vuint32m1_t test_vfcvt_xu_f_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u32m1_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vuint32m2_t test_vfcvt_xu_f_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u32m2_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vuint32m4_t test_vfcvt_xu_f_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u32m4_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vuint32m8_t test_vfcvt_xu_f_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u32m8_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vint32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vint32mf2_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f32mf2_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vint32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfcvt_f_x_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vint32m1_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f32m1_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vint32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfcvt_f_x_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vint32m2_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f32m2_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vint32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfcvt_f_x_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vint32m4_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f32m4_tum(vm, vd, vs2, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vint32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfcvt_f_x_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vint32m8_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f32m8_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f32mf2_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vuint32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vuint32m1_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f32m1_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vuint32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vuint32m2_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f32m2_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vuint32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vuint32m4_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f32m4_tum(vm, vd, vs2, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vuint32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vuint32m8_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f32m8_tum(vm, vd, vs2, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vint64m1_t test_vfcvt_x_f_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i64m1_tum(vm, vd, vs2, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vint64m2_t test_vfcvt_x_f_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i64m2_tum(vm, vd, vs2, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vint64m4_t test_vfcvt_x_f_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i64m4_tum(vm, vd, vs2, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vint64m8_t test_vfcvt_x_f_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i64m8_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vuint64m1_t test_vfcvt_xu_f_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u64m1_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vuint64m2_t test_vfcvt_xu_f_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u64m2_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vuint64m4_t test_vfcvt_xu_f_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u64m4_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vuint64m8_t test_vfcvt_xu_f_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u64m8_tum(vm, vd, vs2, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vint64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfcvt_f_x_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vint64m1_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f64m1_tum(vm, vd, vs2, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vint64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfcvt_f_x_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vint64m2_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f64m2_tum(vm, vd, vs2, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vint64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfcvt_f_x_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vint64m4_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f64m4_tum(vm, vd, vs2, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vint64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfcvt_f_x_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vint64m8_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f64m8_tum(vm, vd, vs2, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vuint64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vuint64m1_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f64m1_tum(vm, vd, vs2, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vuint64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vuint64m2_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f64m2_tum(vm, vd, vs2, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vuint64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vuint64m4_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f64m4_tum(vm, vd, vs2, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vuint64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vuint64m8_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f64m8_tum(vm, vd, vs2, vl); } -vint16mf4_t test_vfcvt_x_f_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vint16mf4_t test_vfcvt_x_f_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16mf4_tumu(vm, vd, vs2, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vint16mf2_t test_vfcvt_x_f_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16mf2_tumu(vm, vd, vs2, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vint16m1_t test_vfcvt_x_f_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16m1_tumu(vm, vd, vs2, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vint16m2_t test_vfcvt_x_f_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16m2_tumu(vm, vd, vs2, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vint16m4_t test_vfcvt_x_f_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16m4_tumu(vm, vd, vs2, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vint16m8_t test_vfcvt_x_f_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16m8_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16mf4_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16mf2_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vuint16m1_t test_vfcvt_xu_f_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16m1_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vuint16m2_t test_vfcvt_xu_f_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16m2_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vuint16m4_t test_vfcvt_xu_f_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16m4_tumu(vm, vd, vs2, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vuint16m8_t test_vfcvt_xu_f_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16m8_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vint16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vint16mf4_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16mf4_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vint16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vint16mf2_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16mf2_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vint16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfcvt_f_x_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vint16m1_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16m1_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vint16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfcvt_f_x_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vint16m2_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16m2_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vint16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfcvt_f_x_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vint16m4_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16m4_tumu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vint16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfcvt_f_x_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vint16m8_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16m8_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vuint16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16mf4_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vuint16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16mf2_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vuint16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vuint16m1_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16m1_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vuint16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vuint16m2_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16m2_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vuint16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vuint16m4_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16m4_tumu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vuint16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vuint16m8_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16m8_tumu(vm, vd, vs2, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vint32mf2_t test_vfcvt_x_f_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i32mf2_tumu(vm, vd, vs2, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vint32m1_t test_vfcvt_x_f_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i32m1_tumu(vm, vd, vs2, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vint32m2_t test_vfcvt_x_f_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i32m2_tumu(vm, vd, vs2, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vint32m4_t test_vfcvt_x_f_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i32m4_tumu(vm, vd, vs2, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vint32m8_t test_vfcvt_x_f_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i32m8_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u32mf2_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vuint32m1_t test_vfcvt_xu_f_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u32m1_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vuint32m2_t test_vfcvt_xu_f_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u32m2_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vuint32m4_t test_vfcvt_xu_f_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u32m4_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vuint32m8_t test_vfcvt_xu_f_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u32m8_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vint32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vint32mf2_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f32mf2_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vint32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfcvt_f_x_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vint32m1_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f32m1_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vint32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfcvt_f_x_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vint32m2_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f32m2_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vint32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfcvt_f_x_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vint32m4_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f32m4_tumu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vint32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfcvt_f_x_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vint32m8_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f32m8_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f32mf2_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vuint32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vuint32m1_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f32m1_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vuint32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vuint32m2_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f32m2_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vuint32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vuint32m4_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f32m4_tumu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vuint32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vuint32m8_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f32m8_tumu(vm, vd, vs2, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vint64m1_t test_vfcvt_x_f_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i64m1_tumu(vm, vd, vs2, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vint64m2_t test_vfcvt_x_f_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i64m2_tumu(vm, vd, vs2, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vint64m4_t test_vfcvt_x_f_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i64m4_tumu(vm, vd, vs2, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vint64m8_t test_vfcvt_x_f_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i64m8_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vuint64m1_t test_vfcvt_xu_f_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u64m1_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vuint64m2_t test_vfcvt_xu_f_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u64m2_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vuint64m4_t test_vfcvt_xu_f_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u64m4_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vuint64m8_t test_vfcvt_xu_f_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u64m8_tumu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vint64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfcvt_f_x_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vint64m1_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f64m1_tumu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vint64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfcvt_f_x_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vint64m2_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f64m2_tumu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vint64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfcvt_f_x_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vint64m4_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f64m4_tumu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vint64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfcvt_f_x_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vint64m8_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f64m8_tumu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vuint64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vuint64m1_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f64m1_tumu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vuint64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vuint64m2_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f64m2_tumu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vuint64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vuint64m4_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f64m4_tumu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vuint64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vuint64m8_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f64m8_tumu(vm, vd, vs2, vl); } -vint16mf4_t test_vfcvt_x_f_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vint16mf4_t test_vfcvt_x_f_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16mf4_mu(vm, vd, vs2, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vint16mf2_t test_vfcvt_x_f_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16mf2_mu(vm, vd, vs2, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vint16m1_t test_vfcvt_x_f_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16m1_mu(vm, vd, vs2, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vint16m2_t test_vfcvt_x_f_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16m2_mu(vm, vd, vs2, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vint16m4_t test_vfcvt_x_f_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16m4_mu(vm, vd, vs2, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vint16m8_t test_vfcvt_x_f_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16m8_mu(vm, vd, vs2, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16mf4_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16mf2_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vuint16m1_t test_vfcvt_xu_f_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16m1_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vuint16m2_t test_vfcvt_xu_f_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16m2_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vuint16m4_t test_vfcvt_xu_f_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16m4_mu(vm, vd, vs2, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vuint16m8_t test_vfcvt_xu_f_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16m8_mu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vint16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vint16mf4_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16mf4_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vint16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vint16mf2_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16mf2_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vint16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfcvt_f_x_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vint16m1_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16m1_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vint16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfcvt_f_x_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vint16m2_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16m2_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vint16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfcvt_f_x_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vint16m4_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16m4_mu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vint16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfcvt_f_x_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vint16m8_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16m8_mu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vuint16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16mf4_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vuint16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16mf2_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vuint16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vuint16m1_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16m1_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vuint16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vuint16m2_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16m2_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vuint16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vuint16m4_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16m4_mu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vuint16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vuint16m8_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16m8_mu(vm, vd, vs2, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vint32mf2_t test_vfcvt_x_f_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i32mf2_mu(vm, vd, vs2, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vint32m1_t test_vfcvt_x_f_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i32m1_mu(vm, vd, vs2, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vint32m2_t test_vfcvt_x_f_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i32m2_mu(vm, vd, vs2, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vint32m4_t test_vfcvt_x_f_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i32m4_mu(vm, vd, vs2, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vint32m8_t test_vfcvt_x_f_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i32m8_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u32mf2_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vuint32m1_t test_vfcvt_xu_f_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u32m1_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vuint32m2_t test_vfcvt_xu_f_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u32m2_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vuint32m4_t test_vfcvt_xu_f_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u32m4_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vuint32m8_t test_vfcvt_xu_f_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u32m8_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vint32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vint32mf2_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f32mf2_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vint32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfcvt_f_x_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vint32m1_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f32m1_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vint32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfcvt_f_x_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vint32m2_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f32m2_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vint32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfcvt_f_x_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vint32m4_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f32m4_mu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vint32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfcvt_f_x_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vint32m8_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f32m8_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f32mf2_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vuint32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vuint32m1_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f32m1_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vuint32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vuint32m2_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f32m2_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vuint32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vuint32m4_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f32m4_mu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vuint32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vuint32m8_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f32m8_mu(vm, vd, vs2, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vint64m1_t test_vfcvt_x_f_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i64m1_mu(vm, vd, vs2, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vint64m2_t test_vfcvt_x_f_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i64m2_mu(vm, vd, vs2, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vint64m4_t test_vfcvt_x_f_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i64m4_mu(vm, vd, vs2, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vint64m8_t test_vfcvt_x_f_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i64m8_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vuint64m1_t test_vfcvt_xu_f_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u64m1_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vuint64m2_t test_vfcvt_xu_f_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u64m2_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vuint64m4_t test_vfcvt_xu_f_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u64m4_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vuint64m8_t test_vfcvt_xu_f_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u64m8_mu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vint64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfcvt_f_x_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vint64m1_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f64m1_mu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vint64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfcvt_f_x_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vint64m2_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f64m2_mu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vint64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfcvt_f_x_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vint64m4_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f64m4_mu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vint64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfcvt_f_x_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vint64m8_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f64m8_mu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vuint64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vuint64m1_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f64m1_mu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vuint64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vuint64m2_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f64m2_mu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vuint64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vuint64m4_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f64m4_mu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vuint64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vuint64m8_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f64m8_mu(vm, vd, vs2, vl); } -vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm_tu(vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm_tu(vint16mf4_t vd, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfcvt_x_f_v_i16mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm_tu(vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm_tu(vint16mf2_t vd, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfcvt_x_f_v_i16mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1_rm_tu(vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vint16m1_t test_vfcvt_x_f_v_i16m1_rm_tu(vint16m1_t vd, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfcvt_x_f_v_i16m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2_rm_tu(vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vint16m2_t test_vfcvt_x_f_v_i16m2_rm_tu(vint16m2_t vd, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfcvt_x_f_v_i16m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4_rm_tu(vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vint16m4_t test_vfcvt_x_f_v_i16m4_rm_tu(vint16m4_t vd, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfcvt_x_f_v_i16m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8_rm_tu(vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vint16m8_t test_vfcvt_x_f_v_i16m8_rm_tu(vint16m8_t vd, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfcvt_x_f_v_i16m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm_tu(vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm_tu(vuint16mf4_t vd, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfcvt_xu_f_v_u16mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm_tu(vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm_tu(vuint16mf2_t vd, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfcvt_xu_f_v_u16mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm_tu(vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm_tu(vuint16m1_t vd, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfcvt_xu_f_v_u16m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm_tu(vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm_tu(vuint16m2_t vd, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfcvt_xu_f_v_u16m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm_tu(vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm_tu(vuint16m4_t vd, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfcvt_xu_f_v_u16m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm_tu(vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm_tu(vuint16m8_t vd, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfcvt_xu_f_v_u16m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm_tu(vfloat16mf4_t vd, vint16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm_tu(vfloat16mf4_t vd, vint16mf4_t vs2, + size_t vl) { return __riscv_vfcvt_f_x_v_f16mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm_tu(vfloat16mf2_t vd, vint16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm_tu(vfloat16mf2_t vd, vint16mf2_t vs2, + size_t vl) { return __riscv_vfcvt_f_x_v_f16mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm_tu(vfloat16m1_t vd, vint16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm_tu(vfloat16m1_t vd, vint16m1_t vs2, + size_t vl) { return __riscv_vfcvt_f_x_v_f16m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm_tu(vfloat16m2_t vd, vint16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm_tu(vfloat16m2_t vd, vint16m2_t vs2, + size_t vl) { return __riscv_vfcvt_f_x_v_f16m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm_tu(vfloat16m4_t vd, vint16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm_tu(vfloat16m4_t vd, vint16m4_t vs2, + size_t vl) { return __riscv_vfcvt_f_x_v_f16m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm_tu(vfloat16m8_t vd, vint16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm_tu(vfloat16m8_t vd, vint16m8_t vs2, + size_t vl) { return __riscv_vfcvt_f_x_v_f16m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm_tu(vfloat16mf4_t vd, vuint16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm_tu(vfloat16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { return __riscv_vfcvt_f_xu_v_f16mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm_tu(vfloat16mf2_t vd, vuint16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm_tu(vfloat16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { return __riscv_vfcvt_f_xu_v_f16mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm_tu(vfloat16m1_t vd, vuint16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm_tu(vfloat16m1_t vd, vuint16m1_t vs2, + size_t vl) { return __riscv_vfcvt_f_xu_v_f16m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm_tu(vfloat16m2_t vd, vuint16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm_tu(vfloat16m2_t vd, vuint16m2_t vs2, + size_t vl) { return __riscv_vfcvt_f_xu_v_f16m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm_tu(vfloat16m4_t vd, vuint16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm_tu(vfloat16m4_t vd, vuint16m4_t vs2, + size_t vl) { return __riscv_vfcvt_f_xu_v_f16m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm_tu(vfloat16m8_t vd, vuint16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm_tu(vfloat16m8_t vd, vuint16m8_t vs2, + size_t vl) { return __riscv_vfcvt_f_xu_v_f16m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm_tu(vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm_tu(vint32mf2_t vd, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfcvt_x_f_v_i32mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1_rm_tu(vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vint32m1_t test_vfcvt_x_f_v_i32m1_rm_tu(vint32m1_t vd, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfcvt_x_f_v_i32m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2_rm_tu(vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vint32m2_t test_vfcvt_x_f_v_i32m2_rm_tu(vint32m2_t vd, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfcvt_x_f_v_i32m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4_rm_tu(vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vint32m4_t test_vfcvt_x_f_v_i32m4_rm_tu(vint32m4_t vd, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfcvt_x_f_v_i32m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8_rm_tu(vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vint32m8_t test_vfcvt_x_f_v_i32m8_rm_tu(vint32m8_t vd, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfcvt_x_f_v_i32m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm_tu(vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm_tu(vuint32mf2_t vd, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfcvt_xu_f_v_u32mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm_tu(vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm_tu(vuint32m1_t vd, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfcvt_xu_f_v_u32m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm_tu(vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm_tu(vuint32m2_t vd, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfcvt_xu_f_v_u32m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm_tu(vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm_tu(vuint32m4_t vd, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfcvt_xu_f_v_u32m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm_tu(vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm_tu(vuint32m8_t vd, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfcvt_xu_f_v_u32m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm_tu(vfloat32mf2_t vd, vint32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm_tu(vfloat32mf2_t vd, vint32mf2_t vs2, + size_t vl) { return __riscv_vfcvt_f_x_v_f32mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm_tu(vfloat32m1_t vd, vint32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm_tu(vfloat32m1_t vd, vint32m1_t vs2, + size_t vl) { return __riscv_vfcvt_f_x_v_f32m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm_tu(vfloat32m2_t vd, vint32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm_tu(vfloat32m2_t vd, vint32m2_t vs2, + size_t vl) { return __riscv_vfcvt_f_x_v_f32m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm_tu(vfloat32m4_t vd, vint32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm_tu(vfloat32m4_t vd, vint32m4_t vs2, + size_t vl) { return __riscv_vfcvt_f_x_v_f32m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm_tu(vfloat32m8_t vd, vint32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm_tu(vfloat32m8_t vd, vint32m8_t vs2, + size_t vl) { return __riscv_vfcvt_f_x_v_f32m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm_tu(vfloat32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm_tu(vfloat32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vfcvt_f_xu_v_f32mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm_tu(vfloat32m1_t vd, vuint32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm_tu(vfloat32m1_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vfcvt_f_xu_v_f32m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm_tu(vfloat32m2_t vd, vuint32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm_tu(vfloat32m2_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vfcvt_f_xu_v_f32m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm_tu(vfloat32m4_t vd, vuint32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm_tu(vfloat32m4_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vfcvt_f_xu_v_f32m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm_tu(vfloat32m8_t vd, vuint32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm_tu(vfloat32m8_t vd, vuint32m8_t vs2, + size_t vl) { return __riscv_vfcvt_f_xu_v_f32m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1_rm_tu(vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vint64m1_t test_vfcvt_x_f_v_i64m1_rm_tu(vint64m1_t vd, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfcvt_x_f_v_i64m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2_rm_tu(vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vint64m2_t test_vfcvt_x_f_v_i64m2_rm_tu(vint64m2_t vd, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfcvt_x_f_v_i64m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4_rm_tu(vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vint64m4_t test_vfcvt_x_f_v_i64m4_rm_tu(vint64m4_t vd, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfcvt_x_f_v_i64m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8_rm_tu(vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vint64m8_t test_vfcvt_x_f_v_i64m8_rm_tu(vint64m8_t vd, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfcvt_x_f_v_i64m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm_tu(vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm_tu(vuint64m1_t vd, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfcvt_xu_f_v_u64m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm_tu(vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm_tu(vuint64m2_t vd, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfcvt_xu_f_v_u64m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm_tu(vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm_tu(vuint64m4_t vd, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfcvt_xu_f_v_u64m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm_tu(vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm_tu(vuint64m8_t vd, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfcvt_xu_f_v_u64m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm_tu(vfloat64m1_t vd, vint64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm_tu(vfloat64m1_t vd, vint64m1_t vs2, + size_t vl) { return __riscv_vfcvt_f_x_v_f64m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm_tu(vfloat64m2_t vd, vint64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm_tu(vfloat64m2_t vd, vint64m2_t vs2, + size_t vl) { return __riscv_vfcvt_f_x_v_f64m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm_tu(vfloat64m4_t vd, vint64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm_tu(vfloat64m4_t vd, vint64m4_t vs2, + size_t vl) { return __riscv_vfcvt_f_x_v_f64m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm_tu(vfloat64m8_t vd, vint64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm_tu(vfloat64m8_t vd, vint64m8_t vs2, + size_t vl) { return __riscv_vfcvt_f_x_v_f64m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm_tu(vfloat64m1_t vd, vuint64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm_tu(vfloat64m1_t vd, vuint64m1_t vs2, + size_t vl) { return __riscv_vfcvt_f_xu_v_f64m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm_tu(vfloat64m2_t vd, vuint64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm_tu(vfloat64m2_t vd, vuint64m2_t vs2, + size_t vl) { return __riscv_vfcvt_f_xu_v_f64m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm_tu(vfloat64m4_t vd, vuint64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm_tu(vfloat64m4_t vd, vuint64m4_t vs2, + size_t vl) { return __riscv_vfcvt_f_xu_v_f64m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm_tu(vfloat64m8_t vd, vuint64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm_tu(vfloat64m8_t vd, vuint64m8_t vs2, + size_t vl) { return __riscv_vfcvt_f_xu_v_f64m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm_tum(vbool64_t vm, vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm_tum(vbool64_t vm, vint16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm_tum(vbool32_t vm, vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm_tum(vbool32_t vm, vint16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1_rm_tum(vbool16_t vm, vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vint16m1_t test_vfcvt_x_f_v_i16m1_rm_tum(vbool16_t vm, vint16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2_rm_tum(vbool8_t vm, vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vint16m2_t test_vfcvt_x_f_v_i16m2_rm_tum(vbool8_t vm, vint16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4_rm_tum(vbool4_t vm, vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vint16m4_t test_vfcvt_x_f_v_i16m4_rm_tum(vbool4_t vm, vint16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8_rm_tum(vbool2_t vm, vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vint16m8_t test_vfcvt_x_f_v_i16m8_rm_tum(vbool2_t vm, vint16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm_tum(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm_tum(vbool64_t vm, vuint16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm_tum(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm_tum(vbool32_t vm, vuint16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm_tum(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm_tum(vbool16_t vm, vuint16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm_tum(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm_tum(vbool8_t vm, vuint16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm_tum(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm_tum(vbool4_t vm, vuint16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm_tum(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm_tum(vbool2_t vm, vuint16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vint16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + vint16mf4_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vint16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + vint16mf2_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vint16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + vint16m1_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vint16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + vint16m2_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vint16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + vint16m4_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vint16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, + vint16m8_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vuint16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vuint16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vuint16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + vuint16m1_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vuint16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + vuint16m2_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vuint16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + vuint16m4_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vuint16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, + vuint16m8_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm_tum(vbool64_t vm, vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm_tum(vbool64_t vm, vint32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i32mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1_rm_tum(vbool32_t vm, vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vint32m1_t test_vfcvt_x_f_v_i32m1_rm_tum(vbool32_t vm, vint32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i32m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2_rm_tum(vbool16_t vm, vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vint32m2_t test_vfcvt_x_f_v_i32m2_rm_tum(vbool16_t vm, vint32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i32m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4_rm_tum(vbool8_t vm, vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vint32m4_t test_vfcvt_x_f_v_i32m4_rm_tum(vbool8_t vm, vint32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i32m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8_rm_tum(vbool4_t vm, vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vint32m8_t test_vfcvt_x_f_v_i32m8_rm_tum(vbool4_t vm, vint32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i32m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm_tum(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm_tum(vbool64_t vm, vuint32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u32mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm_tum(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm_tum(vbool32_t vm, vuint32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u32m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm_tum(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm_tum(vbool16_t vm, vuint32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u32m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm_tum(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm_tum(vbool8_t vm, vuint32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u32m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm_tum(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm_tum(vbool4_t vm, vuint32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u32m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vint32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vint32mf2_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f32mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vint32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vint32m1_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f32m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vint32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vint32m2_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f32m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vint32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vint32m4_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f32m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vint32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vint32m8_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f32m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f32mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vuint32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vuint32m1_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f32m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vuint32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vuint32m2_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f32m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vuint32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vuint32m4_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f32m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vuint32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vuint32m8_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f32m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1_rm_tum(vbool64_t vm, vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vint64m1_t test_vfcvt_x_f_v_i64m1_rm_tum(vbool64_t vm, vint64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i64m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2_rm_tum(vbool32_t vm, vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vint64m2_t test_vfcvt_x_f_v_i64m2_rm_tum(vbool32_t vm, vint64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i64m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4_rm_tum(vbool16_t vm, vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vint64m4_t test_vfcvt_x_f_v_i64m4_rm_tum(vbool16_t vm, vint64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i64m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8_rm_tum(vbool8_t vm, vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vint64m8_t test_vfcvt_x_f_v_i64m8_rm_tum(vbool8_t vm, vint64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i64m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm_tum(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm_tum(vbool64_t vm, vuint64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u64m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm_tum(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm_tum(vbool32_t vm, vuint64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u64m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm_tum(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm_tum(vbool16_t vm, vuint64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u64m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm_tum(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm_tum(vbool8_t vm, vuint64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u64m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vint64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vint64m1_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f64m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vint64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vint64m2_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f64m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vint64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vint64m4_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f64m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vint64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vint64m8_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f64m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vuint64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vuint64m1_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f64m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vuint64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vuint64m2_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f64m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vuint64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vuint64m4_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f64m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vuint64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vuint64m8_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f64m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm_tumu(vbool64_t vm, vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm_tumu(vbool64_t vm, vint16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm_tumu(vbool32_t vm, vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm_tumu(vbool32_t vm, vint16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1_rm_tumu(vbool16_t vm, vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vint16m1_t test_vfcvt_x_f_v_i16m1_rm_tumu(vbool16_t vm, vint16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2_rm_tumu(vbool8_t vm, vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vint16m2_t test_vfcvt_x_f_v_i16m2_rm_tumu(vbool8_t vm, vint16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4_rm_tumu(vbool4_t vm, vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vint16m4_t test_vfcvt_x_f_v_i16m4_rm_tumu(vbool4_t vm, vint16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8_rm_tumu(vbool2_t vm, vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vint16m8_t test_vfcvt_x_f_v_i16m8_rm_tumu(vbool2_t vm, vint16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm_tumu(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm_tumu(vbool64_t vm, vuint16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm_tumu(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm_tumu(vbool32_t vm, vuint16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm_tumu(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm_tumu(vbool16_t vm, vuint16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm_tumu(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm_tumu(vbool8_t vm, vuint16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm_tumu(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm_tumu(vbool4_t vm, vuint16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm_tumu(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm_tumu(vbool2_t vm, vuint16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vint16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + vint16mf4_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vint16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + vint16mf2_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vint16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + vint16m1_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vint16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + vint16m2_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vint16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + vint16m4_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vint16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, + vint16m8_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vuint16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vuint16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vuint16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + vuint16m1_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vuint16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + vuint16m2_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vuint16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + vuint16m4_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vuint16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, + vuint16m8_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm_tumu(vbool64_t vm, vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm_tumu(vbool64_t vm, vint32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i32mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1_rm_tumu(vbool32_t vm, vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vint32m1_t test_vfcvt_x_f_v_i32m1_rm_tumu(vbool32_t vm, vint32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i32m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2_rm_tumu(vbool16_t vm, vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vint32m2_t test_vfcvt_x_f_v_i32m2_rm_tumu(vbool16_t vm, vint32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i32m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4_rm_tumu(vbool8_t vm, vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vint32m4_t test_vfcvt_x_f_v_i32m4_rm_tumu(vbool8_t vm, vint32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i32m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8_rm_tumu(vbool4_t vm, vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vint32m8_t test_vfcvt_x_f_v_i32m8_rm_tumu(vbool4_t vm, vint32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i32m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm_tumu(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm_tumu(vbool64_t vm, vuint32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u32mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm_tumu(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm_tumu(vbool32_t vm, vuint32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u32m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm_tumu(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm_tumu(vbool16_t vm, vuint32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u32m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm_tumu(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm_tumu(vbool8_t vm, vuint32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u32m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm_tumu(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm_tumu(vbool4_t vm, vuint32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u32m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vint32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vint32mf2_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f32mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vint32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vint32m1_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f32m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vint32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vint32m2_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f32m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vint32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vint32m4_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f32m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vint32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vint32m8_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f32m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f32mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vuint32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vuint32m1_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f32m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vuint32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vuint32m2_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f32m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vuint32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vuint32m4_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f32m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vuint32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vuint32m8_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f32m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1_rm_tumu(vbool64_t vm, vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vint64m1_t test_vfcvt_x_f_v_i64m1_rm_tumu(vbool64_t vm, vint64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i64m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2_rm_tumu(vbool32_t vm, vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vint64m2_t test_vfcvt_x_f_v_i64m2_rm_tumu(vbool32_t vm, vint64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i64m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4_rm_tumu(vbool16_t vm, vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vint64m4_t test_vfcvt_x_f_v_i64m4_rm_tumu(vbool16_t vm, vint64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i64m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8_rm_tumu(vbool8_t vm, vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vint64m8_t test_vfcvt_x_f_v_i64m8_rm_tumu(vbool8_t vm, vint64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i64m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm_tumu(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm_tumu(vbool64_t vm, vuint64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u64m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm_tumu(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm_tumu(vbool32_t vm, vuint64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u64m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm_tumu(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm_tumu(vbool16_t vm, vuint64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u64m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm_tumu(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm_tumu(vbool8_t vm, vuint64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u64m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vint64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vint64m1_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f64m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vint64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vint64m2_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f64m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vint64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vint64m4_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f64m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vint64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vint64m8_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f64m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vuint64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vuint64m1_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f64m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vuint64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vuint64m2_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f64m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vuint64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vuint64m4_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f64m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vuint64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vuint64m8_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f64m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm_mu(vbool64_t vm, vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vint16mf4_t test_vfcvt_x_f_v_i16mf4_rm_mu(vbool64_t vm, vint16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm_mu(vbool32_t vm, vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vint16mf2_t test_vfcvt_x_f_v_i16mf2_rm_mu(vbool32_t vm, vint16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfcvt_x_f_v_i16m1_rm_mu(vbool16_t vm, vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vint16m1_t test_vfcvt_x_f_v_i16m1_rm_mu(vbool16_t vm, vint16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfcvt_x_f_v_i16m2_rm_mu(vbool8_t vm, vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vint16m2_t test_vfcvt_x_f_v_i16m2_rm_mu(vbool8_t vm, vint16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfcvt_x_f_v_i16m4_rm_mu(vbool4_t vm, vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vint16m4_t test_vfcvt_x_f_v_i16m4_rm_mu(vbool4_t vm, vint16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m8_t test_vfcvt_x_f_v_i16m8_rm_mu(vbool2_t vm, vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vint16m8_t test_vfcvt_x_f_v_i16m8_rm_mu(vbool2_t vm, vint16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i16m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm_mu(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vfcvt_xu_f_v_u16mf4_rm_mu(vbool64_t vm, vuint16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm_mu(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vfcvt_xu_f_v_u16mf2_rm_mu(vbool32_t vm, vuint16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm_mu(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vuint16m1_t test_vfcvt_xu_f_v_u16m1_rm_mu(vbool16_t vm, vuint16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm_mu(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vuint16m2_t test_vfcvt_xu_f_v_u16m2_rm_mu(vbool8_t vm, vuint16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm_mu(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vuint16m4_t test_vfcvt_xu_f_v_u16m4_rm_mu(vbool4_t vm, vuint16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm_mu(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vuint16m8_t test_vfcvt_xu_f_v_u16m8_rm_mu(vbool2_t vm, vuint16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u16m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vint16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfcvt_f_x_v_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + vint16mf4_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vint16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfcvt_f_x_v_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + vint16mf2_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vint16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfcvt_f_x_v_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + vint16m1_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vint16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfcvt_f_x_v_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + vint16m2_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vint16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfcvt_f_x_v_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + vint16m4_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vint16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfcvt_f_x_v_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, + vint16m8_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f16m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vuint16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vuint16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vuint16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + vuint16m1_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vuint16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + vuint16m2_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vuint16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + vuint16m4_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vuint16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, + vuint16m8_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f16m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm_mu(vbool64_t vm, vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vint32mf2_t test_vfcvt_x_f_v_i32mf2_rm_mu(vbool64_t vm, vint32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i32mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfcvt_x_f_v_i32m1_rm_mu(vbool32_t vm, vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vint32m1_t test_vfcvt_x_f_v_i32m1_rm_mu(vbool32_t vm, vint32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i32m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfcvt_x_f_v_i32m2_rm_mu(vbool16_t vm, vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vint32m2_t test_vfcvt_x_f_v_i32m2_rm_mu(vbool16_t vm, vint32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i32m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfcvt_x_f_v_i32m4_rm_mu(vbool8_t vm, vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vint32m4_t test_vfcvt_x_f_v_i32m4_rm_mu(vbool8_t vm, vint32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i32m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfcvt_x_f_v_i32m8_rm_mu(vbool4_t vm, vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vint32m8_t test_vfcvt_x_f_v_i32m8_rm_mu(vbool4_t vm, vint32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i32m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm_mu(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vfcvt_xu_f_v_u32mf2_rm_mu(vbool64_t vm, vuint32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u32mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm_mu(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vuint32m1_t test_vfcvt_xu_f_v_u32m1_rm_mu(vbool32_t vm, vuint32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u32m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm_mu(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vuint32m2_t test_vfcvt_xu_f_v_u32m2_rm_mu(vbool16_t vm, vuint32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u32m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm_mu(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vuint32m4_t test_vfcvt_xu_f_v_u32m4_rm_mu(vbool8_t vm, vuint32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u32m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm_mu(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vuint32m8_t test_vfcvt_xu_f_v_u32m8_rm_mu(vbool4_t vm, vuint32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u32m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vint32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfcvt_f_x_v_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vint32mf2_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f32mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vint32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfcvt_f_x_v_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vint32m1_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f32m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vint32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfcvt_f_x_v_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vint32m2_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f32m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vint32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfcvt_f_x_v_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vint32m4_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f32m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vint32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfcvt_f_x_v_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vint32m8_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f32m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vuint32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f32mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vuint32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vuint32m1_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f32m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vuint32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vuint32m2_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f32m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vuint32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vuint32m4_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f32m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vuint32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vuint32m8_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f32m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfcvt_x_f_v_i64m1_rm_mu(vbool64_t vm, vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vint64m1_t test_vfcvt_x_f_v_i64m1_rm_mu(vbool64_t vm, vint64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i64m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfcvt_x_f_v_i64m2_rm_mu(vbool32_t vm, vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vint64m2_t test_vfcvt_x_f_v_i64m2_rm_mu(vbool32_t vm, vint64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i64m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfcvt_x_f_v_i64m4_rm_mu(vbool16_t vm, vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vint64m4_t test_vfcvt_x_f_v_i64m4_rm_mu(vbool16_t vm, vint64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i64m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfcvt_x_f_v_i64m8_rm_mu(vbool8_t vm, vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vint64m8_t test_vfcvt_x_f_v_i64m8_rm_mu(vbool8_t vm, vint64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfcvt_x_f_v_i64m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm_mu(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vuint64m1_t test_vfcvt_xu_f_v_u64m1_rm_mu(vbool64_t vm, vuint64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u64m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm_mu(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vuint64m2_t test_vfcvt_xu_f_v_u64m2_rm_mu(vbool32_t vm, vuint64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u64m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm_mu(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vuint64m4_t test_vfcvt_xu_f_v_u64m4_rm_mu(vbool16_t vm, vuint64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u64m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm_mu(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vuint64m8_t test_vfcvt_xu_f_v_u64m8_rm_mu(vbool8_t vm, vuint64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfcvt_xu_f_v_u64m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vint64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfcvt_f_x_v_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vint64m1_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f64m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vint64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfcvt_f_x_v_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vint64m2_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f64m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vint64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfcvt_f_x_v_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vint64m4_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f64m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vint64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfcvt_f_x_v_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vint64m8_t vs2, size_t vl) { return __riscv_vfcvt_f_x_v_f64m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vuint64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vuint64m1_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f64m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vuint64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vuint64m2_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f64m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vuint64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vuint64m4_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f64m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vuint64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vuint64m8_t vs2, size_t vl) { return __riscv_vfcvt_f_xu_v_f64m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfcvt_rtz.c b/auto-generated/policy_funcs/llvm-api-tests/vfcvt_rtz.c index a2ece9d38..76bb0ac58 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfcvt_rtz.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfcvt_rtz.c @@ -1,487 +1,607 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tu(vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tu(vint16mf4_t vd, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i16mf4_tu(vd, vs2, vl); } -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tu(vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tu(vint16mf2_t vd, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i16mf2_tu(vd, vs2, vl); } -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tu(vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tu(vint16m1_t vd, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i16m1_tu(vd, vs2, vl); } -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tu(vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tu(vint16m2_t vd, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i16m2_tu(vd, vs2, vl); } -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tu(vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tu(vint16m4_t vd, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i16m4_tu(vd, vs2, vl); } -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tu(vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tu(vint16m8_t vd, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i16m8_tu(vd, vs2, vl); } -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tu(vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tu(vuint16mf4_t vd, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u16mf4_tu(vd, vs2, vl); } -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tu(vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tu(vuint16mf2_t vd, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u16mf2_tu(vd, vs2, vl); } -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tu(vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tu(vuint16m1_t vd, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u16m1_tu(vd, vs2, vl); } -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tu(vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tu(vuint16m2_t vd, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u16m2_tu(vd, vs2, vl); } -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tu(vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tu(vuint16m4_t vd, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u16m4_tu(vd, vs2, vl); } -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tu(vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tu(vuint16m8_t vd, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u16m8_tu(vd, vs2, vl); } -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tu(vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tu(vint32mf2_t vd, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i32mf2_tu(vd, vs2, vl); } -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tu(vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tu(vint32m1_t vd, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i32m1_tu(vd, vs2, vl); } -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tu(vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tu(vint32m2_t vd, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i32m2_tu(vd, vs2, vl); } -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tu(vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tu(vint32m4_t vd, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i32m4_tu(vd, vs2, vl); } -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tu(vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tu(vint32m8_t vd, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i32m8_tu(vd, vs2, vl); } -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tu(vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tu(vuint32mf2_t vd, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tu(vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tu(vuint32m1_t vd, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tu(vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tu(vuint32m2_t vd, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tu(vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tu(vuint32m4_t vd, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tu(vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tu(vuint32m8_t vd, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u32m8_tu(vd, vs2, vl); } -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tu(vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tu(vint64m1_t vd, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i64m1_tu(vd, vs2, vl); } -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tu(vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tu(vint64m2_t vd, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i64m2_tu(vd, vs2, vl); } -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tu(vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tu(vint64m4_t vd, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i64m4_tu(vd, vs2, vl); } -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tu(vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tu(vint64m8_t vd, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i64m8_tu(vd, vs2, vl); } -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tu(vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tu(vuint64m1_t vd, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u64m1_tu(vd, vs2, vl); } -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tu(vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tu(vuint64m2_t vd, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u64m2_tu(vd, vs2, vl); } -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tu(vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tu(vuint64m4_t vd, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u64m4_tu(vd, vs2, vl); } -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tu(vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tu(vuint64m8_t vd, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u64m8_tu(vd, vs2, vl); } -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i16mf4_tum(vm, vd, vs2, vl); } -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i16mf2_tum(vm, vd, vs2, vl); } -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i16m1_tum(vm, vd, vs2, vl); } -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i16m2_tum(vm, vd, vs2, vl); } -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i16m4_tum(vm, vd, vs2, vl); } -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i16m8_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u16mf4_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u16mf2_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u16m1_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u16m2_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u16m4_tum(vm, vd, vs2, vl); } -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u16m8_tum(vm, vd, vs2, vl); } -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i32mf2_tum(vm, vd, vs2, vl); } -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i32m1_tum(vm, vd, vs2, vl); } -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i32m2_tum(vm, vd, vs2, vl); } -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i32m4_tum(vm, vd, vs2, vl); } -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i32m8_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u32mf2_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u32m1_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u32m2_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u32m4_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u32m8_tum(vm, vd, vs2, vl); } -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i64m1_tum(vm, vd, vs2, vl); } -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i64m2_tum(vm, vd, vs2, vl); } -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i64m4_tum(vm, vd, vs2, vl); } -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i64m8_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u64m1_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u64m2_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u64m4_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u64m8_tum(vm, vd, vs2, vl); } -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i16mf4_tumu(vm, vd, vs2, vl); } -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i16mf2_tumu(vm, vd, vs2, vl); } -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i16m1_tumu(vm, vd, vs2, vl); } -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i16m2_tumu(vm, vd, vs2, vl); } -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i16m4_tumu(vm, vd, vs2, vl); } -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i16m8_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u16mf4_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u16mf2_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u16m1_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u16m2_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u16m4_tumu(vm, vd, vs2, vl); } -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u16m8_tumu(vm, vd, vs2, vl); } -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i32mf2_tumu(vm, vd, vs2, vl); } -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i32m1_tumu(vm, vd, vs2, vl); } -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i32m2_tumu(vm, vd, vs2, vl); } -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i32m4_tumu(vm, vd, vs2, vl); } -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i32m8_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u32mf2_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u32m1_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u32m2_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u32m4_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u32m8_tumu(vm, vd, vs2, vl); } -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i64m1_tumu(vm, vd, vs2, vl); } -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i64m2_tumu(vm, vd, vs2, vl); } -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i64m4_tumu(vm, vd, vs2, vl); } -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i64m8_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u64m1_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u64m2_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u64m4_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u64m8_tumu(vm, vd, vs2, vl); } -vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i16mf4_mu(vm, vd, vs2, vl); } -vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i16mf2_mu(vm, vd, vs2, vl); } -vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vint16m1_t test_vfcvt_rtz_x_f_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i16m1_mu(vm, vd, vs2, vl); } -vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vint16m2_t test_vfcvt_rtz_x_f_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i16m2_mu(vm, vd, vs2, vl); } -vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vint16m4_t test_vfcvt_rtz_x_f_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i16m4_mu(vm, vd, vs2, vl); } -vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vint16m8_t test_vfcvt_rtz_x_f_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i16m8_mu(vm, vd, vs2, vl); } -vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u16mf4_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u16mf2_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u16m1_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u16m2_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u16m4_mu(vm, vd, vs2, vl); } -vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u16m8_mu(vm, vd, vs2, vl); } -vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i32mf2_mu(vm, vd, vs2, vl); } -vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vint32m1_t test_vfcvt_rtz_x_f_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i32m1_mu(vm, vd, vs2, vl); } -vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vint32m2_t test_vfcvt_rtz_x_f_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i32m2_mu(vm, vd, vs2, vl); } -vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vint32m4_t test_vfcvt_rtz_x_f_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i32m4_mu(vm, vd, vs2, vl); } -vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vint32m8_t test_vfcvt_rtz_x_f_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i32m8_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u32mf2_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u32m1_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u32m2_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u32m4_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u32m8_mu(vm, vd, vs2, vl); } -vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vint64m1_t test_vfcvt_rtz_x_f_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i64m1_mu(vm, vd, vs2, vl); } -vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vint64m2_t test_vfcvt_rtz_x_f_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i64m2_mu(vm, vd, vs2, vl); } -vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vint64m4_t test_vfcvt_rtz_x_f_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i64m4_mu(vm, vd, vs2, vl); } -vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vint64m8_t test_vfcvt_rtz_x_f_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfcvt_rtz_x_f_v_i64m8_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u64m1_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u64m2_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u64m4_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfcvt_rtz_xu_f_v_u64m8_mu(vm, vd, vs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfdiv.c b/auto-generated/policy_funcs/llvm-api-tests/vfdiv.c index 46c1039dd..288623743 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfdiv.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfdiv.c @@ -1,967 +1,1354 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vfdiv_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfdiv_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + vfloat16mf4_t vs1, size_t vl) { return __riscv_vfdiv_vv_f16mf4_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfdiv_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfdiv_vf_f16mf4_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfdiv_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + vfloat16mf2_t vs1, size_t vl) { return __riscv_vfdiv_vv_f16mf2_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfdiv_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfdiv_vf_f16mf2_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfdiv_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfdiv_vv_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfdiv_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfdiv_vf_f16m1_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfdiv_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + vfloat16m2_t vs1, size_t vl) { return __riscv_vfdiv_vv_f16m2_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfdiv_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfdiv_vf_f16m2_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfdiv_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + vfloat16m4_t vs1, size_t vl) { return __riscv_vfdiv_vv_f16m4_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfdiv_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfdiv_vf_f16m4_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfdiv_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + vfloat16m8_t vs1, size_t vl) { return __riscv_vfdiv_vv_f16m8_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfdiv_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfdiv_vf_f16m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfdiv_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + vfloat32mf2_t vs1, size_t vl) { return __riscv_vfdiv_vv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfdiv_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + float rs1, size_t vl) { return __riscv_vfdiv_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfdiv_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfdiv_vv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfdiv_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + float rs1, size_t vl) { return __riscv_vfdiv_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfdiv_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + vfloat32m2_t vs1, size_t vl) { return __riscv_vfdiv_vv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfdiv_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + float rs1, size_t vl) { return __riscv_vfdiv_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfdiv_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + vfloat32m4_t vs1, size_t vl) { return __riscv_vfdiv_vv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfdiv_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + float rs1, size_t vl) { return __riscv_vfdiv_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfdiv_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + vfloat32m8_t vs1, size_t vl) { return __riscv_vfdiv_vv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfdiv_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + float rs1, size_t vl) { return __riscv_vfdiv_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfdiv_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfdiv_vv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfdiv_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + double rs1, size_t vl) { return __riscv_vfdiv_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfdiv_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + vfloat64m2_t vs1, size_t vl) { return __riscv_vfdiv_vv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfdiv_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + double rs1, size_t vl) { return __riscv_vfdiv_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfdiv_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + vfloat64m4_t vs1, size_t vl) { return __riscv_vfdiv_vv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfdiv_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + double rs1, size_t vl) { return __riscv_vfdiv_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfdiv_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + vfloat64m8_t vs1, size_t vl) { return __riscv_vfdiv_vv_f64m8_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfdiv_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + double rs1, size_t vl) { return __riscv_vfdiv_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfdiv_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfdiv_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16mf4_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfdiv_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16mf4_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfdiv_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfdiv_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfdiv_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfdiv_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16m1_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfdiv_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16m2_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfdiv_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16m2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfdiv_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16m4_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfdiv_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16m4_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfdiv_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16m8_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfdiv_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfdiv_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfdiv_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfdiv_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfdiv_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfdiv_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfdiv_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfdiv_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfdiv_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfdiv_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfdiv_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfdiv_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfdiv_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfdiv_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfdiv_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfdiv_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfdiv_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfdiv_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vfdiv_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfdiv_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfdiv_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vfdiv_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfdiv_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfdiv_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vfdiv_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfdiv_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfdiv_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, size_t vl) { return __riscv_vfdiv_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfdiv_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfdiv_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16mf4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfdiv_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16mf4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfdiv_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfdiv_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfdiv_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfdiv_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfdiv_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfdiv_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfdiv_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfdiv_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfdiv_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfdiv_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfdiv_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfdiv_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfdiv_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfdiv_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfdiv_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfdiv_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfdiv_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfdiv_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfdiv_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfdiv_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfdiv_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfdiv_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfdiv_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfdiv_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfdiv_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfdiv_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfdiv_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vfdiv_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfdiv_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfdiv_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vfdiv_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfdiv_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfdiv_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vfdiv_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfdiv_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfdiv_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, size_t vl) { return __riscv_vfdiv_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfdiv_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfdiv_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16mf4_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfdiv_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16mf4_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfdiv_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfdiv_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfdiv_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16m1_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfdiv_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, size_t vl) { return __riscv_vfdiv_vf_f16m1_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfdiv_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16m2_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfdiv_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, size_t vl) { return __riscv_vfdiv_vf_f16m2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfdiv_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16m4_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfdiv_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, size_t vl) { return __riscv_vfdiv_vf_f16m4_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfdiv_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16m8_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfdiv_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, size_t vl) { return __riscv_vfdiv_vf_f16m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfdiv_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfdiv_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, size_t vl) { return __riscv_vfdiv_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfdiv_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfdiv_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfdiv_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfdiv_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfdiv_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfdiv_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfdiv_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfdiv_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfdiv_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfdiv_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfdiv_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfdiv_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfdiv_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfdiv_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vfdiv_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfdiv_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfdiv_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vfdiv_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfdiv_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfdiv_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vfdiv_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfdiv_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfdiv_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, size_t vl) { return __riscv_vfdiv_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfdiv_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfdiv_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + vfloat16mf4_t vs1, size_t vl) { return __riscv_vfdiv_vv_f16mf4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfdiv_vf_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfdiv_vf_f16mf4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfdiv_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + vfloat16mf2_t vs1, size_t vl) { return __riscv_vfdiv_vv_f16mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfdiv_vf_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfdiv_vf_f16mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfdiv_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfdiv_vv_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfdiv_vf_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfdiv_vf_f16m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfdiv_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + vfloat16m2_t vs1, size_t vl) { return __riscv_vfdiv_vv_f16m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfdiv_vf_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfdiv_vf_f16m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfdiv_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + vfloat16m4_t vs1, size_t vl) { return __riscv_vfdiv_vv_f16m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfdiv_vf_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfdiv_vf_f16m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfdiv_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + vfloat16m8_t vs1, size_t vl) { return __riscv_vfdiv_vv_f16m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfdiv_vf_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfdiv_vf_f16m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfdiv_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + vfloat32mf2_t vs1, size_t vl) { return __riscv_vfdiv_vv_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfdiv_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + float rs1, size_t vl) { return __riscv_vfdiv_vf_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfdiv_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfdiv_vv_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfdiv_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + float rs1, size_t vl) { return __riscv_vfdiv_vf_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfdiv_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + vfloat32m2_t vs1, size_t vl) { return __riscv_vfdiv_vv_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfdiv_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + float rs1, size_t vl) { return __riscv_vfdiv_vf_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfdiv_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + vfloat32m4_t vs1, size_t vl) { return __riscv_vfdiv_vv_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfdiv_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + float rs1, size_t vl) { return __riscv_vfdiv_vf_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfdiv_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + vfloat32m8_t vs1, size_t vl) { return __riscv_vfdiv_vv_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfdiv_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + float rs1, size_t vl) { return __riscv_vfdiv_vf_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfdiv_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfdiv_vv_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfdiv_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + double rs1, size_t vl) { return __riscv_vfdiv_vf_f64m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfdiv_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + vfloat64m2_t vs1, size_t vl) { return __riscv_vfdiv_vv_f64m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfdiv_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + double rs1, size_t vl) { return __riscv_vfdiv_vf_f64m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfdiv_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + vfloat64m4_t vs1, size_t vl) { return __riscv_vfdiv_vv_f64m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfdiv_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + double rs1, size_t vl) { return __riscv_vfdiv_vf_f64m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfdiv_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + vfloat64m8_t vs1, size_t vl) { return __riscv_vfdiv_vv_f64m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfdiv_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + double rs1, size_t vl) { return __riscv_vfdiv_vf_f64m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfdiv_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfdiv_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16mf4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfdiv_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16mf4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfdiv_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16mf2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfdiv_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfdiv_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfdiv_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfdiv_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfdiv_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfdiv_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfdiv_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfdiv_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfdiv_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfdiv_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f32mf2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfdiv_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfdiv_vf_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfdiv_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfdiv_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, + size_t vl) { return __riscv_vfdiv_vf_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfdiv_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f32m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfdiv_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, + size_t vl) { return __riscv_vfdiv_vf_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfdiv_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f32m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfdiv_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, + size_t vl) { return __riscv_vfdiv_vf_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfdiv_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f32m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfdiv_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, + size_t vl) { return __riscv_vfdiv_vf_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfdiv_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfdiv_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, + size_t vl) { return __riscv_vfdiv_vf_f64m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfdiv_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f64m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfdiv_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, + size_t vl) { return __riscv_vfdiv_vf_f64m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfdiv_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f64m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfdiv_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, + size_t vl) { return __riscv_vfdiv_vf_f64m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfdiv_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f64m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfdiv_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, + size_t vl) { return __riscv_vfdiv_vf_f64m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfdiv_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfdiv_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16mf4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfdiv_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16mf4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfdiv_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16mf2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfdiv_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfdiv_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfdiv_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfdiv_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfdiv_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfdiv_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfdiv_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfdiv_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfdiv_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfdiv_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f32mf2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfdiv_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfdiv_vf_f32mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfdiv_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f32m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfdiv_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, + size_t vl) { return __riscv_vfdiv_vf_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfdiv_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f32m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfdiv_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, + size_t vl) { return __riscv_vfdiv_vf_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfdiv_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f32m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfdiv_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, + size_t vl) { return __riscv_vfdiv_vf_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfdiv_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f32m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfdiv_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, + size_t vl) { return __riscv_vfdiv_vf_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfdiv_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f64m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfdiv_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, + size_t vl) { return __riscv_vfdiv_vf_f64m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfdiv_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f64m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfdiv_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, + size_t vl) { return __riscv_vfdiv_vf_f64m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfdiv_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f64m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfdiv_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, + size_t vl) { return __riscv_vfdiv_vf_f64m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfdiv_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f64m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfdiv_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, + size_t vl) { return __riscv_vfdiv_vf_f64m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfdiv_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfdiv_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16mf4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfdiv_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfdiv_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16mf4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfdiv_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16mf2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfdiv_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfdiv_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfdiv_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfdiv_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfdiv_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfdiv_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfdiv_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfdiv_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfdiv_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfdiv_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfdiv_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfdiv_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f16m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfdiv_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfdiv_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfdiv_vf_f16m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfdiv_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f32mf2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfdiv_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfdiv_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfdiv_vf_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfdiv_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f32m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfdiv_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfdiv_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfdiv_vf_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfdiv_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfdiv_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfdiv_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfdiv_vf_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfdiv_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfdiv_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfdiv_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfdiv_vf_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfdiv_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfdiv_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfdiv_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfdiv_vf_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfdiv_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f64m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfdiv_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfdiv_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, + size_t vl) { return __riscv_vfdiv_vf_f64m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfdiv_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f64m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfdiv_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfdiv_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, + size_t vl) { return __riscv_vfdiv_vf_f64m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfdiv_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f64m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfdiv_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfdiv_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, + size_t vl) { return __riscv_vfdiv_vf_f64m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfdiv_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfdiv_vv_f64m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfdiv_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfdiv_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, + size_t vl) { return __riscv_vfdiv_vf_f64m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfmacc.c b/auto-generated/policy_funcs/llvm-api-tests/vfmacc.c index 98c597f81..c81d55623 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfmacc.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfmacc.c @@ -1,967 +1,1367 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vfmacc_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmacc_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfmacc_vv_f16mf4_tu(vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmacc_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmacc_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfmacc_vf_f16mf4_tu(vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmacc_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmacc_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfmacc_vv_f16mf2_tu(vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmacc_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmacc_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfmacc_vf_f16mf2_tu(vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmacc_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmacc_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfmacc_vv_f16m1_tu(vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmacc_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmacc_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfmacc_vf_f16m1_tu(vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmacc_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmacc_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfmacc_vv_f16m2_tu(vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmacc_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmacc_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfmacc_vf_f16m2_tu(vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmacc_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmacc_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfmacc_vv_f16m4_tu(vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmacc_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmacc_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfmacc_vf_f16m4_tu(vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmacc_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmacc_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f16m8_tu(vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmacc_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmacc_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfmacc_vf_f16m8_tu(vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfmacc_vv_f32mf2_tu(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmacc_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmacc_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfmacc_vf_f32mf2_tu(vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfmacc_vv_f32m1_tu(vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmacc_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmacc_vf_f32m1_tu(vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfmacc_vf_f32m1_tu(vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfmacc_vv_f32m2_tu(vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmacc_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmacc_vf_f32m2_tu(vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfmacc_vf_f32m2_tu(vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmacc_vv_f32m4_tu(vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmacc_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmacc_vf_f32m4_tu(vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmacc_vf_f32m4_tu(vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f32m8_tu(vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmacc_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmacc_vf_f32m8_tu(vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmacc_vf_f32m8_tu(vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfmacc_vv_f64m1_tu(vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmacc_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmacc_vf_f64m1_tu(vfloat64m1_t vd, double rs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfmacc_vf_f64m1_tu(vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfmacc_vv_f64m2_tu(vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmacc_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmacc_vf_f64m2_tu(vfloat64m2_t vd, double rs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfmacc_vf_f64m2_tu(vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfmacc_vv_f64m4_tu(vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmacc_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmacc_vf_f64m4_tu(vfloat64m4_t vd, double rs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfmacc_vf_f64m4_tu(vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f64m8_tu(vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmacc_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmacc_vf_f64m8_tu(vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfmacc_vf_f64m8_tu(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmacc_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmacc_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16mf4_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmacc_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmacc_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f16mf4_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmacc_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmacc_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmacc_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmacc_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f16mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmacc_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmacc_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16m1_tum(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmacc_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmacc_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f16m1_tum(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmacc_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmacc_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16m2_tum(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmacc_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmacc_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f16m2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmacc_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmacc_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16m4_tum(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmacc_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmacc_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f16m4_tum(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmacc_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmacc_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16m8_tum(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmacc_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmacc_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f16m8_tum(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmacc_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmacc_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmacc_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmacc_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f32mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmacc_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmacc_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmacc_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmacc_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfmacc_vf_f32m1_tum(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmacc_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmacc_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmacc_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmacc_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfmacc_vf_f32m2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmacc_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmacc_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmacc_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmacc_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmacc_vf_f32m4_tum(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmacc_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmacc_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmacc_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmacc_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmacc_vf_f32m8_tum(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmacc_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmacc_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmacc_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmacc_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, double rs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfmacc_vf_f64m1_tum(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmacc_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmacc_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmacc_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmacc_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, double rs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfmacc_vf_f64m2_tum(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmacc_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmacc_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmacc_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmacc_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, double rs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfmacc_vf_f64m4_tum(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmacc_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmacc_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmacc_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmacc_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfmacc_vf_f64m8_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmacc_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmacc_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16mf4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmacc_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmacc_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f16mf4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmacc_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmacc_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmacc_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmacc_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f16mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmacc_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmacc_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmacc_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmacc_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f16m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmacc_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmacc_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmacc_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmacc_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f16m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmacc_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmacc_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmacc_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmacc_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f16m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmacc_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmacc_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmacc_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmacc_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f16m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmacc_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmacc_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmacc_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmacc_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f32mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmacc_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmacc_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmacc_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmacc_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfmacc_vf_f32m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmacc_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmacc_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmacc_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmacc_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfmacc_vf_f32m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmacc_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmacc_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmacc_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmacc_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmacc_vf_f32m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmacc_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmacc_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmacc_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmacc_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmacc_vf_f32m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmacc_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmacc_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmacc_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmacc_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f64m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmacc_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmacc_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmacc_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmacc_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f64m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmacc_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmacc_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmacc_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmacc_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f64m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmacc_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmacc_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmacc_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmacc_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfmacc_vf_f64m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmacc_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmacc_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16mf4_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmacc_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmacc_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f16mf4_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmacc_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmacc_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmacc_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmacc_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f16mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmacc_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmacc_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16m1_mu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmacc_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmacc_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f16m1_mu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmacc_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmacc_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16m2_mu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmacc_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmacc_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfmacc_vf_f16m2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmacc_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmacc_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16m4_mu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmacc_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmacc_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfmacc_vf_f16m4_mu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmacc_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmacc_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16m8_mu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmacc_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmacc_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfmacc_vf_f16m8_mu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmacc_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmacc_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmacc_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmacc_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f32mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmacc_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmacc_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmacc_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmacc_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfmacc_vf_f32m1_mu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmacc_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmacc_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmacc_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmacc_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfmacc_vf_f32m2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmacc_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmacc_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmacc_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmacc_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmacc_vf_f32m4_mu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmacc_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmacc_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmacc_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmacc_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmacc_vf_f32m8_mu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmacc_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmacc_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmacc_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmacc_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, double rs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfmacc_vf_f64m1_mu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmacc_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmacc_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmacc_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmacc_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, double rs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfmacc_vf_f64m2_mu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmacc_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmacc_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmacc_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmacc_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, double rs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfmacc_vf_f64m4_mu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmacc_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmacc_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f64m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmacc_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmacc_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfmacc_vf_f64m8_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmacc_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmacc_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfmacc_vv_f16mf4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmacc_vf_f16mf4_rm_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmacc_vf_f16mf4_rm_tu(vfloat16mf4_t vd, _Float16 rs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfmacc_vf_f16mf4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmacc_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmacc_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfmacc_vv_f16mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmacc_vf_f16mf2_rm_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmacc_vf_f16mf2_rm_tu(vfloat16mf2_t vd, _Float16 rs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfmacc_vf_f16mf2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmacc_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmacc_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfmacc_vv_f16m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmacc_vf_f16m1_rm_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmacc_vf_f16m1_rm_tu(vfloat16m1_t vd, _Float16 rs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfmacc_vf_f16m1_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmacc_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmacc_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfmacc_vv_f16m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmacc_vf_f16m2_rm_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmacc_vf_f16m2_rm_tu(vfloat16m2_t vd, _Float16 rs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfmacc_vf_f16m2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmacc_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmacc_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfmacc_vv_f16m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmacc_vf_f16m4_rm_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmacc_vf_f16m4_rm_tu(vfloat16m4_t vd, _Float16 rs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfmacc_vf_f16m4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmacc_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmacc_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f16m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmacc_vf_f16m8_rm_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmacc_vf_f16m8_rm_tu(vfloat16m8_t vd, _Float16 rs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfmacc_vf_f16m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmacc_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmacc_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfmacc_vv_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmacc_vf_f32mf2_rm_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmacc_vf_f32mf2_rm_tu(vfloat32mf2_t vd, float rs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfmacc_vf_f32mf2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmacc_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmacc_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfmacc_vv_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmacc_vf_f32m1_rm_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmacc_vf_f32m1_rm_tu(vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfmacc_vf_f32m1_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmacc_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmacc_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfmacc_vv_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmacc_vf_f32m2_rm_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmacc_vf_f32m2_rm_tu(vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfmacc_vf_f32m2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmacc_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmacc_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmacc_vv_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmacc_vf_f32m4_rm_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmacc_vf_f32m4_rm_tu(vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmacc_vf_f32m4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmacc_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmacc_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmacc_vf_f32m8_rm_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmacc_vf_f32m8_rm_tu(vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmacc_vf_f32m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmacc_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmacc_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfmacc_vv_f64m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmacc_vf_f64m1_rm_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmacc_vf_f64m1_rm_tu(vfloat64m1_t vd, double rs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfmacc_vf_f64m1_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmacc_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmacc_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfmacc_vv_f64m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmacc_vf_f64m2_rm_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmacc_vf_f64m2_rm_tu(vfloat64m2_t vd, double rs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfmacc_vf_f64m2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmacc_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmacc_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfmacc_vv_f64m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmacc_vf_f64m4_rm_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmacc_vf_f64m4_rm_tu(vfloat64m4_t vd, double rs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfmacc_vf_f64m4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmacc_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmacc_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfmacc_vv_f64m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmacc_vf_f64m8_rm_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmacc_vf_f64m8_rm_tu(vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfmacc_vf_f64m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmacc_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmacc_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16mf4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmacc_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmacc_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f16mf4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmacc_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmacc_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmacc_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmacc_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f16mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmacc_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmacc_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmacc_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmacc_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f16m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmacc_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmacc_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmacc_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmacc_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f16m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmacc_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmacc_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmacc_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmacc_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f16m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmacc_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmacc_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmacc_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmacc_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f16m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmacc_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmacc_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmacc_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmacc_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f32mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmacc_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmacc_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmacc_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmacc_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + float rs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f32m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmacc_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmacc_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmacc_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmacc_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + float rs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f32m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmacc_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmacc_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmacc_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmacc_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + float rs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f32m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmacc_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmacc_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmacc_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmacc_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + float rs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f32m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmacc_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmacc_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmacc_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmacc_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f64m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmacc_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmacc_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmacc_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmacc_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f64m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmacc_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmacc_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmacc_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmacc_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f64m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmacc_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmacc_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmacc_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmacc_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + double rs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f64m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmacc_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16mf4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmacc_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16mf4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf4_t test_vfmacc_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16mf4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmacc_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfmacc_vf_f16mf4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf2_t test_vfmacc_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f16mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmacc_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f16mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf2_t test_vfmacc_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f16mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmacc_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfmacc_vf_f16mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m1_t test_vfmacc_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmacc_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmacc_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmacc_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f16m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmacc_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmacc_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmacc_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmacc_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f16m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmacc_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmacc_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmacc_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmacc_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f16m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmacc_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmacc_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmacc_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmacc_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f16m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmacc_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmacc_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfmacc_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmacc_vf_f32mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmacc_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfmacc_vf_f32mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfmacc_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmacc_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmacc_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmacc_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + float rs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f32m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmacc_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmacc_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmacc_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmacc_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + float rs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f32m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmacc_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmacc_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmacc_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmacc_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + float rs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f32m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmacc_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmacc_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmacc_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmacc_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + float rs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f32m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmacc_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmacc_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmacc_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmacc_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f64m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmacc_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmacc_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmacc_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmacc_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f64m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmacc_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmacc_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmacc_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmacc_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f64m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmacc_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmacc_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmacc_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmacc_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + double rs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f64m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmacc_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmacc_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16mf4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmacc_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmacc_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f16mf4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmacc_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmacc_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmacc_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmacc_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f16mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmacc_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmacc_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmacc_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmacc_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f16m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmacc_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmacc_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmacc_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmacc_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f16m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmacc_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmacc_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmacc_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmacc_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f16m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmacc_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmacc_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f16m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmacc_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmacc_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f16m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmacc_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmacc_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmacc_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmacc_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f32mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmacc_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmacc_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmacc_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmacc_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + float rs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f32m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmacc_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmacc_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmacc_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmacc_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + float rs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f32m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmacc_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmacc_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmacc_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmacc_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmacc_vf_f32m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmacc_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmacc_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmacc_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmacc_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmacc_vf_f32m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmacc_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmacc_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmacc_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmacc_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f64m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmacc_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmacc_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmacc_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmacc_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f64m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmacc_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmacc_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmacc_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmacc_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f64m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmacc_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmacc_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmacc_vv_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmacc_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmacc_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + double rs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmacc_vf_f64m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfmadd.c b/auto-generated/policy_funcs/llvm-api-tests/vfmadd.c index 695b9f505..fbd2569a4 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfmadd.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfmadd.c @@ -1,967 +1,1367 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vfmadd_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmadd_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfmadd_vv_f16mf4_tu(vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmadd_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmadd_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfmadd_vf_f16mf4_tu(vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmadd_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmadd_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfmadd_vv_f16mf2_tu(vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmadd_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmadd_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfmadd_vf_f16mf2_tu(vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmadd_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmadd_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfmadd_vv_f16m1_tu(vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmadd_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmadd_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfmadd_vf_f16m1_tu(vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmadd_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmadd_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfmadd_vv_f16m2_tu(vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmadd_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmadd_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfmadd_vf_f16m2_tu(vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmadd_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmadd_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfmadd_vv_f16m4_tu(vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmadd_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmadd_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfmadd_vf_f16m4_tu(vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmadd_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmadd_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f16m8_tu(vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmadd_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmadd_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfmadd_vf_f16m8_tu(vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmadd_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmadd_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfmadd_vv_f32mf2_tu(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmadd_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmadd_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfmadd_vf_f32mf2_tu(vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmadd_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmadd_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfmadd_vv_f32m1_tu(vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmadd_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmadd_vf_f32m1_tu(vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfmadd_vf_f32m1_tu(vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmadd_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmadd_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfmadd_vv_f32m2_tu(vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmadd_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmadd_vf_f32m2_tu(vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfmadd_vf_f32m2_tu(vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmadd_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmadd_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmadd_vv_f32m4_tu(vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmadd_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmadd_vf_f32m4_tu(vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmadd_vf_f32m4_tu(vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmadd_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmadd_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f32m8_tu(vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmadd_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmadd_vf_f32m8_tu(vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmadd_vf_f32m8_tu(vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmadd_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmadd_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfmadd_vv_f64m1_tu(vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmadd_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmadd_vf_f64m1_tu(vfloat64m1_t vd, double rs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfmadd_vf_f64m1_tu(vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmadd_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmadd_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfmadd_vv_f64m2_tu(vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmadd_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmadd_vf_f64m2_tu(vfloat64m2_t vd, double rs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfmadd_vf_f64m2_tu(vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmadd_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmadd_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfmadd_vv_f64m4_tu(vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmadd_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmadd_vf_f64m4_tu(vfloat64m4_t vd, double rs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfmadd_vf_f64m4_tu(vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmadd_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmadd_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f64m8_tu(vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmadd_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmadd_vf_f64m8_tu(vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfmadd_vf_f64m8_tu(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmadd_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmadd_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16mf4_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmadd_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmadd_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f16mf4_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmadd_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmadd_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmadd_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmadd_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f16mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmadd_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmadd_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16m1_tum(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmadd_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmadd_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f16m1_tum(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmadd_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmadd_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16m2_tum(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmadd_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmadd_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f16m2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmadd_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmadd_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16m4_tum(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmadd_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmadd_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f16m4_tum(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmadd_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmadd_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16m8_tum(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmadd_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmadd_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f16m8_tum(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmadd_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmadd_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmadd_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmadd_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f32mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmadd_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmadd_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmadd_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmadd_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfmadd_vf_f32m1_tum(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmadd_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmadd_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmadd_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmadd_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfmadd_vf_f32m2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmadd_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmadd_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmadd_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmadd_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmadd_vf_f32m4_tum(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmadd_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmadd_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmadd_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmadd_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmadd_vf_f32m8_tum(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmadd_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmadd_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmadd_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmadd_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, double rs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfmadd_vf_f64m1_tum(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmadd_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmadd_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmadd_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmadd_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, double rs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfmadd_vf_f64m2_tum(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmadd_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmadd_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmadd_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmadd_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, double rs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfmadd_vf_f64m4_tum(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmadd_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmadd_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmadd_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmadd_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfmadd_vf_f64m8_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmadd_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmadd_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16mf4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmadd_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmadd_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f16mf4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmadd_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmadd_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmadd_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmadd_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f16mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmadd_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmadd_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmadd_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmadd_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f16m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmadd_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmadd_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmadd_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmadd_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f16m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmadd_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmadd_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmadd_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmadd_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f16m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmadd_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmadd_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmadd_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmadd_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f16m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmadd_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmadd_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmadd_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmadd_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f32mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmadd_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmadd_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmadd_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmadd_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfmadd_vf_f32m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmadd_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmadd_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmadd_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmadd_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfmadd_vf_f32m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmadd_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmadd_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmadd_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmadd_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmadd_vf_f32m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmadd_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmadd_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmadd_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmadd_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmadd_vf_f32m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmadd_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmadd_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmadd_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmadd_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f64m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmadd_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmadd_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmadd_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmadd_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f64m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmadd_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmadd_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmadd_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmadd_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f64m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmadd_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmadd_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmadd_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmadd_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfmadd_vf_f64m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmadd_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmadd_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16mf4_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmadd_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmadd_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f16mf4_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmadd_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmadd_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmadd_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmadd_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f16mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmadd_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmadd_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16m1_mu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmadd_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmadd_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f16m1_mu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmadd_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmadd_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16m2_mu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmadd_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmadd_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfmadd_vf_f16m2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmadd_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmadd_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16m4_mu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmadd_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmadd_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfmadd_vf_f16m4_mu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmadd_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmadd_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16m8_mu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmadd_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmadd_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfmadd_vf_f16m8_mu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmadd_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmadd_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmadd_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmadd_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f32mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmadd_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmadd_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmadd_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmadd_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfmadd_vf_f32m1_mu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmadd_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmadd_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmadd_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmadd_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfmadd_vf_f32m2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmadd_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmadd_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmadd_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmadd_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmadd_vf_f32m4_mu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmadd_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmadd_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmadd_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmadd_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmadd_vf_f32m8_mu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmadd_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmadd_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmadd_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmadd_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, double rs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfmadd_vf_f64m1_mu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmadd_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmadd_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmadd_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmadd_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, double rs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfmadd_vf_f64m2_mu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmadd_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmadd_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmadd_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmadd_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, double rs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfmadd_vf_f64m4_mu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmadd_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmadd_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f64m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmadd_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmadd_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfmadd_vf_f64m8_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmadd_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmadd_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfmadd_vv_f16mf4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmadd_vf_f16mf4_rm_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmadd_vf_f16mf4_rm_tu(vfloat16mf4_t vd, _Float16 rs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfmadd_vf_f16mf4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmadd_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmadd_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfmadd_vv_f16mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmadd_vf_f16mf2_rm_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmadd_vf_f16mf2_rm_tu(vfloat16mf2_t vd, _Float16 rs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfmadd_vf_f16mf2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmadd_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmadd_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfmadd_vv_f16m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmadd_vf_f16m1_rm_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmadd_vf_f16m1_rm_tu(vfloat16m1_t vd, _Float16 rs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfmadd_vf_f16m1_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmadd_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmadd_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfmadd_vv_f16m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmadd_vf_f16m2_rm_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmadd_vf_f16m2_rm_tu(vfloat16m2_t vd, _Float16 rs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfmadd_vf_f16m2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmadd_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmadd_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfmadd_vv_f16m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmadd_vf_f16m4_rm_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmadd_vf_f16m4_rm_tu(vfloat16m4_t vd, _Float16 rs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfmadd_vf_f16m4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmadd_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmadd_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f16m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmadd_vf_f16m8_rm_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmadd_vf_f16m8_rm_tu(vfloat16m8_t vd, _Float16 rs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfmadd_vf_f16m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmadd_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmadd_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfmadd_vv_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmadd_vf_f32mf2_rm_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmadd_vf_f32mf2_rm_tu(vfloat32mf2_t vd, float rs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfmadd_vf_f32mf2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmadd_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmadd_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfmadd_vv_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmadd_vf_f32m1_rm_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmadd_vf_f32m1_rm_tu(vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfmadd_vf_f32m1_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmadd_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmadd_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfmadd_vv_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmadd_vf_f32m2_rm_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmadd_vf_f32m2_rm_tu(vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfmadd_vf_f32m2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmadd_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmadd_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmadd_vv_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmadd_vf_f32m4_rm_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmadd_vf_f32m4_rm_tu(vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmadd_vf_f32m4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmadd_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmadd_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmadd_vf_f32m8_rm_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmadd_vf_f32m8_rm_tu(vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmadd_vf_f32m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmadd_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmadd_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfmadd_vv_f64m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmadd_vf_f64m1_rm_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmadd_vf_f64m1_rm_tu(vfloat64m1_t vd, double rs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfmadd_vf_f64m1_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmadd_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmadd_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfmadd_vv_f64m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmadd_vf_f64m2_rm_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmadd_vf_f64m2_rm_tu(vfloat64m2_t vd, double rs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfmadd_vf_f64m2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmadd_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmadd_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfmadd_vv_f64m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmadd_vf_f64m4_rm_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmadd_vf_f64m4_rm_tu(vfloat64m4_t vd, double rs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfmadd_vf_f64m4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmadd_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmadd_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfmadd_vv_f64m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmadd_vf_f64m8_rm_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmadd_vf_f64m8_rm_tu(vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfmadd_vf_f64m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmadd_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmadd_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16mf4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmadd_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmadd_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f16mf4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmadd_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmadd_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmadd_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmadd_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f16mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmadd_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmadd_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmadd_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmadd_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f16m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmadd_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmadd_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmadd_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmadd_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f16m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmadd_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmadd_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmadd_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmadd_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f16m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmadd_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmadd_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmadd_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmadd_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f16m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmadd_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmadd_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmadd_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmadd_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f32mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmadd_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmadd_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmadd_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmadd_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + float rs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f32m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmadd_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmadd_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmadd_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmadd_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + float rs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f32m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmadd_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmadd_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmadd_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmadd_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + float rs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f32m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmadd_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmadd_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmadd_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmadd_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + float rs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f32m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmadd_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmadd_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmadd_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmadd_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f64m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmadd_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmadd_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmadd_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmadd_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f64m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmadd_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmadd_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmadd_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmadd_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f64m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmadd_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmadd_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmadd_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmadd_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + double rs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f64m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmadd_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16mf4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmadd_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16mf4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf4_t test_vfmadd_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16mf4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmadd_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfmadd_vf_f16mf4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf2_t test_vfmadd_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f16mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmadd_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f16mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf2_t test_vfmadd_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f16mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmadd_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfmadd_vf_f16mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m1_t test_vfmadd_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmadd_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmadd_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmadd_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f16m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmadd_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmadd_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmadd_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmadd_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f16m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmadd_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmadd_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmadd_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmadd_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f16m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmadd_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmadd_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmadd_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmadd_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f16m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmadd_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmadd_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfmadd_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmadd_vf_f32mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmadd_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfmadd_vf_f32mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfmadd_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmadd_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmadd_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmadd_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + float rs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f32m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmadd_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmadd_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmadd_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmadd_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + float rs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f32m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmadd_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmadd_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmadd_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmadd_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + float rs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f32m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmadd_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmadd_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmadd_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmadd_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + float rs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f32m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmadd_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmadd_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmadd_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmadd_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f64m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmadd_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmadd_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmadd_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmadd_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f64m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmadd_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmadd_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmadd_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmadd_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f64m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmadd_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmadd_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmadd_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmadd_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + double rs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f64m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmadd_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmadd_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16mf4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmadd_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmadd_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f16mf4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmadd_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmadd_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmadd_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmadd_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f16mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmadd_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmadd_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmadd_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmadd_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f16m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmadd_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmadd_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmadd_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmadd_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f16m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmadd_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmadd_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmadd_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmadd_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f16m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmadd_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmadd_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f16m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmadd_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmadd_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f16m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmadd_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmadd_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmadd_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmadd_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f32mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmadd_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmadd_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmadd_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmadd_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + float rs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f32m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmadd_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmadd_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmadd_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmadd_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + float rs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f32m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmadd_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmadd_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmadd_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmadd_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmadd_vf_f32m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmadd_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmadd_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmadd_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmadd_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmadd_vf_f32m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmadd_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmadd_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmadd_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmadd_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f64m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmadd_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmadd_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmadd_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmadd_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f64m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmadd_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmadd_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmadd_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmadd_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f64m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmadd_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmadd_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmadd_vv_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmadd_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmadd_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + double rs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmadd_vf_f64m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfmax.c b/auto-generated/policy_funcs/llvm-api-tests/vfmax.c index 050592bfb..b952d0a2e 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfmax.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfmax.c @@ -1,487 +1,668 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vfmax_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfmax_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + vfloat16mf4_t vs1, size_t vl) { return __riscv_vfmax_vv_f16mf4_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmax_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfmax_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfmax_vf_f16mf4_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmax_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfmax_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + vfloat16mf2_t vs1, size_t vl) { return __riscv_vfmax_vv_f16mf2_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmax_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfmax_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfmax_vf_f16mf2_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmax_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfmax_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfmax_vv_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmax_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfmax_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfmax_vf_f16m1_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmax_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfmax_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + vfloat16m2_t vs1, size_t vl) { return __riscv_vfmax_vv_f16m2_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmax_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfmax_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfmax_vf_f16m2_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmax_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfmax_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + vfloat16m4_t vs1, size_t vl) { return __riscv_vfmax_vv_f16m4_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmax_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfmax_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfmax_vf_f16m4_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmax_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfmax_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + vfloat16m8_t vs1, size_t vl) { return __riscv_vfmax_vv_f16m8_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmax_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfmax_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfmax_vf_f16m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmax_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfmax_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + vfloat32mf2_t vs1, size_t vl) { return __riscv_vfmax_vv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmax_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfmax_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + float rs1, size_t vl) { return __riscv_vfmax_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmax_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfmax_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfmax_vv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmax_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfmax_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + float rs1, size_t vl) { return __riscv_vfmax_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmax_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfmax_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + vfloat32m2_t vs1, size_t vl) { return __riscv_vfmax_vv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmax_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfmax_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + float rs1, size_t vl) { return __riscv_vfmax_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmax_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfmax_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + vfloat32m4_t vs1, size_t vl) { return __riscv_vfmax_vv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmax_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfmax_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + float rs1, size_t vl) { return __riscv_vfmax_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmax_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfmax_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + vfloat32m8_t vs1, size_t vl) { return __riscv_vfmax_vv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmax_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfmax_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + float rs1, size_t vl) { return __riscv_vfmax_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmax_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfmax_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfmax_vv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmax_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfmax_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + double rs1, size_t vl) { return __riscv_vfmax_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmax_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfmax_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + vfloat64m2_t vs1, size_t vl) { return __riscv_vfmax_vv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmax_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfmax_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + double rs1, size_t vl) { return __riscv_vfmax_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmax_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfmax_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + vfloat64m4_t vs1, size_t vl) { return __riscv_vfmax_vv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmax_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfmax_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + double rs1, size_t vl) { return __riscv_vfmax_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmax_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfmax_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + vfloat64m8_t vs1, size_t vl) { return __riscv_vfmax_vv_f64m8_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmax_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfmax_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + double rs1, size_t vl) { return __riscv_vfmax_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfmax_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfmax_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfmax_vv_f16mf4_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmax_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfmax_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmax_vf_f16mf4_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmax_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfmax_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfmax_vv_f16mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmax_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfmax_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmax_vf_f16mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmax_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfmax_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfmax_vv_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmax_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfmax_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmax_vf_f16m1_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmax_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfmax_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfmax_vv_f16m2_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmax_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfmax_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmax_vf_f16m2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmax_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfmax_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfmax_vv_f16m4_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmax_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfmax_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmax_vf_f16m4_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmax_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfmax_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfmax_vv_f16m8_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmax_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfmax_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmax_vf_f16m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmax_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfmax_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfmax_vv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmax_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfmax_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfmax_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmax_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfmax_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfmax_vv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmax_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfmax_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfmax_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmax_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfmax_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfmax_vv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmax_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfmax_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfmax_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmax_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfmax_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfmax_vv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmax_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfmax_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfmax_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmax_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfmax_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfmax_vv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmax_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfmax_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfmax_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmax_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfmax_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfmax_vv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmax_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfmax_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vfmax_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmax_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfmax_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfmax_vv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmax_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfmax_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vfmax_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmax_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfmax_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfmax_vv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmax_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfmax_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vfmax_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmax_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfmax_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfmax_vv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmax_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfmax_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, size_t vl) { return __riscv_vfmax_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfmax_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfmax_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfmax_vv_f16mf4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmax_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfmax_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmax_vf_f16mf4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmax_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfmax_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfmax_vv_f16mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmax_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfmax_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmax_vf_f16mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmax_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfmax_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfmax_vv_f16m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmax_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfmax_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmax_vf_f16m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmax_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfmax_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfmax_vv_f16m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmax_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfmax_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmax_vf_f16m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmax_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfmax_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfmax_vv_f16m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmax_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfmax_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmax_vf_f16m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmax_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfmax_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfmax_vv_f16m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmax_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfmax_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmax_vf_f16m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmax_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfmax_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfmax_vv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmax_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfmax_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfmax_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmax_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfmax_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfmax_vv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmax_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfmax_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfmax_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmax_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfmax_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfmax_vv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmax_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfmax_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfmax_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmax_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfmax_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfmax_vv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmax_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfmax_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfmax_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmax_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfmax_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfmax_vv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmax_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfmax_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfmax_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmax_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfmax_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfmax_vv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmax_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfmax_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vfmax_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmax_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfmax_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfmax_vv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmax_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfmax_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vfmax_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmax_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfmax_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfmax_vv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmax_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfmax_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vfmax_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmax_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfmax_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfmax_vv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmax_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfmax_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, size_t vl) { return __riscv_vfmax_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfmax_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfmax_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfmax_vv_f16mf4_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmax_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfmax_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmax_vf_f16mf4_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmax_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfmax_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfmax_vv_f16mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmax_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfmax_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmax_vf_f16mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmax_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfmax_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfmax_vv_f16m1_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmax_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfmax_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, size_t vl) { return __riscv_vfmax_vf_f16m1_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmax_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfmax_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfmax_vv_f16m2_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmax_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfmax_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, size_t vl) { return __riscv_vfmax_vf_f16m2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmax_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfmax_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfmax_vv_f16m4_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmax_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfmax_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, size_t vl) { return __riscv_vfmax_vf_f16m4_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmax_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfmax_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfmax_vv_f16m8_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmax_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfmax_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, size_t vl) { return __riscv_vfmax_vf_f16m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmax_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfmax_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfmax_vv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmax_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfmax_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, size_t vl) { return __riscv_vfmax_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmax_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfmax_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfmax_vv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmax_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfmax_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfmax_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmax_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfmax_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfmax_vv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmax_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfmax_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfmax_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmax_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfmax_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfmax_vv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmax_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfmax_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfmax_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmax_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfmax_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfmax_vv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmax_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfmax_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfmax_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmax_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfmax_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfmax_vv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmax_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfmax_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vfmax_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmax_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfmax_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfmax_vv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmax_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfmax_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vfmax_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmax_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfmax_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfmax_vv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmax_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfmax_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vfmax_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmax_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfmax_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfmax_vv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmax_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfmax_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, size_t vl) { return __riscv_vfmax_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfmerge.c b/auto-generated/policy_funcs/llvm-api-tests/vfmerge.c index c2340518c..00af5a6c3 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfmerge.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfmerge.c @@ -1,67 +1,84 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vfmerge_vfm_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, vbool64_t v0, size_t vl) { +vfloat16mf4_t test_vfmerge_vfm_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + _Float16 rs1, vbool64_t v0, + size_t vl) { return __riscv_vfmerge_vfm_f16mf4_tu(vd, vs2, rs1, v0, vl); } -vfloat16mf2_t test_vfmerge_vfm_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, vbool32_t v0, size_t vl) { +vfloat16mf2_t test_vfmerge_vfm_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + _Float16 rs1, vbool32_t v0, + size_t vl) { return __riscv_vfmerge_vfm_f16mf2_tu(vd, vs2, rs1, v0, vl); } -vfloat16m1_t test_vfmerge_vfm_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, vbool16_t v0, size_t vl) { +vfloat16m1_t test_vfmerge_vfm_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + _Float16 rs1, vbool16_t v0, size_t vl) { return __riscv_vfmerge_vfm_f16m1_tu(vd, vs2, rs1, v0, vl); } -vfloat16m2_t test_vfmerge_vfm_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, vbool8_t v0, size_t vl) { +vfloat16m2_t test_vfmerge_vfm_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + _Float16 rs1, vbool8_t v0, size_t vl) { return __riscv_vfmerge_vfm_f16m2_tu(vd, vs2, rs1, v0, vl); } -vfloat16m4_t test_vfmerge_vfm_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, vbool4_t v0, size_t vl) { +vfloat16m4_t test_vfmerge_vfm_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + _Float16 rs1, vbool4_t v0, size_t vl) { return __riscv_vfmerge_vfm_f16m4_tu(vd, vs2, rs1, v0, vl); } -vfloat16m8_t test_vfmerge_vfm_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, vbool2_t v0, size_t vl) { +vfloat16m8_t test_vfmerge_vfm_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + _Float16 rs1, vbool2_t v0, size_t vl) { return __riscv_vfmerge_vfm_f16m8_tu(vd, vs2, rs1, v0, vl); } -vfloat32mf2_t test_vfmerge_vfm_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, vbool64_t v0, size_t vl) { +vfloat32mf2_t test_vfmerge_vfm_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + float rs1, vbool64_t v0, size_t vl) { return __riscv_vfmerge_vfm_f32mf2_tu(vd, vs2, rs1, v0, vl); } -vfloat32m1_t test_vfmerge_vfm_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, vbool32_t v0, size_t vl) { +vfloat32m1_t test_vfmerge_vfm_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + float rs1, vbool32_t v0, size_t vl) { return __riscv_vfmerge_vfm_f32m1_tu(vd, vs2, rs1, v0, vl); } -vfloat32m2_t test_vfmerge_vfm_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, vbool16_t v0, size_t vl) { +vfloat32m2_t test_vfmerge_vfm_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + float rs1, vbool16_t v0, size_t vl) { return __riscv_vfmerge_vfm_f32m2_tu(vd, vs2, rs1, v0, vl); } -vfloat32m4_t test_vfmerge_vfm_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, vbool8_t v0, size_t vl) { +vfloat32m4_t test_vfmerge_vfm_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + float rs1, vbool8_t v0, size_t vl) { return __riscv_vfmerge_vfm_f32m4_tu(vd, vs2, rs1, v0, vl); } -vfloat32m8_t test_vfmerge_vfm_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, vbool4_t v0, size_t vl) { +vfloat32m8_t test_vfmerge_vfm_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + float rs1, vbool4_t v0, size_t vl) { return __riscv_vfmerge_vfm_f32m8_tu(vd, vs2, rs1, v0, vl); } -vfloat64m1_t test_vfmerge_vfm_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, vbool64_t v0, size_t vl) { +vfloat64m1_t test_vfmerge_vfm_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + double rs1, vbool64_t v0, size_t vl) { return __riscv_vfmerge_vfm_f64m1_tu(vd, vs2, rs1, v0, vl); } -vfloat64m2_t test_vfmerge_vfm_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, vbool32_t v0, size_t vl) { +vfloat64m2_t test_vfmerge_vfm_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + double rs1, vbool32_t v0, size_t vl) { return __riscv_vfmerge_vfm_f64m2_tu(vd, vs2, rs1, v0, vl); } -vfloat64m4_t test_vfmerge_vfm_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, vbool16_t v0, size_t vl) { +vfloat64m4_t test_vfmerge_vfm_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + double rs1, vbool16_t v0, size_t vl) { return __riscv_vfmerge_vfm_f64m4_tu(vd, vs2, rs1, v0, vl); } -vfloat64m8_t test_vfmerge_vfm_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, vbool8_t v0, size_t vl) { +vfloat64m8_t test_vfmerge_vfm_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + double rs1, vbool8_t v0, size_t vl) { return __riscv_vfmerge_vfm_f64m8_tu(vd, vs2, rs1, v0, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfmin.c b/auto-generated/policy_funcs/llvm-api-tests/vfmin.c index ca65682f4..ab473680b 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfmin.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfmin.c @@ -1,487 +1,668 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vfmin_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfmin_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + vfloat16mf4_t vs1, size_t vl) { return __riscv_vfmin_vv_f16mf4_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmin_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfmin_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfmin_vf_f16mf4_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmin_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfmin_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + vfloat16mf2_t vs1, size_t vl) { return __riscv_vfmin_vv_f16mf2_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmin_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfmin_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfmin_vf_f16mf2_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmin_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfmin_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfmin_vv_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmin_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfmin_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfmin_vf_f16m1_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmin_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfmin_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + vfloat16m2_t vs1, size_t vl) { return __riscv_vfmin_vv_f16m2_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmin_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfmin_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfmin_vf_f16m2_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmin_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfmin_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + vfloat16m4_t vs1, size_t vl) { return __riscv_vfmin_vv_f16m4_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmin_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfmin_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfmin_vf_f16m4_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmin_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfmin_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + vfloat16m8_t vs1, size_t vl) { return __riscv_vfmin_vv_f16m8_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmin_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfmin_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfmin_vf_f16m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmin_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfmin_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + vfloat32mf2_t vs1, size_t vl) { return __riscv_vfmin_vv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmin_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfmin_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + float rs1, size_t vl) { return __riscv_vfmin_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmin_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfmin_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfmin_vv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmin_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfmin_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + float rs1, size_t vl) { return __riscv_vfmin_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmin_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfmin_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + vfloat32m2_t vs1, size_t vl) { return __riscv_vfmin_vv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmin_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfmin_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + float rs1, size_t vl) { return __riscv_vfmin_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmin_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfmin_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + vfloat32m4_t vs1, size_t vl) { return __riscv_vfmin_vv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmin_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfmin_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + float rs1, size_t vl) { return __riscv_vfmin_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmin_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfmin_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + vfloat32m8_t vs1, size_t vl) { return __riscv_vfmin_vv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmin_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfmin_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + float rs1, size_t vl) { return __riscv_vfmin_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmin_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfmin_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfmin_vv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmin_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfmin_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + double rs1, size_t vl) { return __riscv_vfmin_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmin_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfmin_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + vfloat64m2_t vs1, size_t vl) { return __riscv_vfmin_vv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmin_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfmin_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + double rs1, size_t vl) { return __riscv_vfmin_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmin_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfmin_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + vfloat64m4_t vs1, size_t vl) { return __riscv_vfmin_vv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmin_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfmin_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + double rs1, size_t vl) { return __riscv_vfmin_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmin_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfmin_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + vfloat64m8_t vs1, size_t vl) { return __riscv_vfmin_vv_f64m8_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmin_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfmin_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + double rs1, size_t vl) { return __riscv_vfmin_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfmin_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfmin_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfmin_vv_f16mf4_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmin_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfmin_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmin_vf_f16mf4_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmin_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfmin_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfmin_vv_f16mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmin_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfmin_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmin_vf_f16mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmin_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfmin_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfmin_vv_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmin_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfmin_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmin_vf_f16m1_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmin_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfmin_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfmin_vv_f16m2_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmin_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfmin_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmin_vf_f16m2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmin_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfmin_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfmin_vv_f16m4_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmin_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfmin_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmin_vf_f16m4_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmin_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfmin_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfmin_vv_f16m8_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmin_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfmin_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmin_vf_f16m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmin_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfmin_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfmin_vv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmin_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfmin_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfmin_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmin_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfmin_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfmin_vv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmin_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfmin_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfmin_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmin_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfmin_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfmin_vv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmin_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfmin_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfmin_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmin_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfmin_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfmin_vv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmin_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfmin_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfmin_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmin_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfmin_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfmin_vv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmin_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfmin_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfmin_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmin_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfmin_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfmin_vv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmin_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfmin_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vfmin_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmin_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfmin_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfmin_vv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmin_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfmin_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vfmin_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmin_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfmin_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfmin_vv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmin_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfmin_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vfmin_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmin_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfmin_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfmin_vv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmin_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfmin_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, size_t vl) { return __riscv_vfmin_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfmin_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfmin_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfmin_vv_f16mf4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmin_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfmin_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmin_vf_f16mf4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmin_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfmin_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfmin_vv_f16mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmin_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfmin_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmin_vf_f16mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmin_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfmin_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfmin_vv_f16m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmin_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfmin_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmin_vf_f16m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmin_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfmin_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfmin_vv_f16m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmin_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfmin_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmin_vf_f16m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmin_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfmin_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfmin_vv_f16m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmin_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfmin_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmin_vf_f16m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmin_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfmin_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfmin_vv_f16m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmin_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfmin_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmin_vf_f16m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmin_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfmin_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfmin_vv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmin_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfmin_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfmin_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmin_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfmin_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfmin_vv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmin_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfmin_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfmin_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmin_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfmin_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfmin_vv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmin_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfmin_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfmin_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmin_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfmin_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfmin_vv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmin_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfmin_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfmin_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmin_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfmin_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfmin_vv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmin_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfmin_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfmin_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmin_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfmin_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfmin_vv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmin_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfmin_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vfmin_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmin_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfmin_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfmin_vv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmin_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfmin_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vfmin_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmin_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfmin_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfmin_vv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmin_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfmin_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vfmin_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmin_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfmin_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfmin_vv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmin_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfmin_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, size_t vl) { return __riscv_vfmin_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfmin_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfmin_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfmin_vv_f16mf4_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmin_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfmin_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmin_vf_f16mf4_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmin_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfmin_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfmin_vv_f16mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmin_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfmin_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmin_vf_f16mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmin_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfmin_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfmin_vv_f16m1_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmin_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfmin_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, size_t vl) { return __riscv_vfmin_vf_f16m1_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmin_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfmin_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfmin_vv_f16m2_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmin_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfmin_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, size_t vl) { return __riscv_vfmin_vf_f16m2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmin_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfmin_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfmin_vv_f16m4_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmin_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfmin_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, size_t vl) { return __riscv_vfmin_vf_f16m4_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmin_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfmin_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfmin_vv_f16m8_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmin_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfmin_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, size_t vl) { return __riscv_vfmin_vf_f16m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmin_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfmin_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfmin_vv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmin_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfmin_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, size_t vl) { return __riscv_vfmin_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmin_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfmin_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfmin_vv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmin_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfmin_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfmin_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmin_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfmin_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfmin_vv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmin_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfmin_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfmin_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmin_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfmin_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfmin_vv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmin_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfmin_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfmin_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmin_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfmin_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfmin_vv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmin_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfmin_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfmin_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmin_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfmin_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfmin_vv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmin_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfmin_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vfmin_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmin_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfmin_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfmin_vv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmin_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfmin_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vfmin_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmin_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfmin_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfmin_vv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmin_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfmin_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vfmin_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmin_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfmin_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfmin_vv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmin_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfmin_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, size_t vl) { return __riscv_vfmin_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfmsac.c b/auto-generated/policy_funcs/llvm-api-tests/vfmsac.c index 72f117c72..5eef22162 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfmsac.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfmsac.c @@ -1,967 +1,1367 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vfmsac_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsac_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfmsac_vv_f16mf4_tu(vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmsac_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsac_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfmsac_vf_f16mf4_tu(vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmsac_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsac_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfmsac_vv_f16mf2_tu(vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmsac_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsac_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfmsac_vf_f16mf2_tu(vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmsac_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsac_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfmsac_vv_f16m1_tu(vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmsac_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsac_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfmsac_vf_f16m1_tu(vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmsac_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsac_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfmsac_vv_f16m2_tu(vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmsac_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsac_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfmsac_vf_f16m2_tu(vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmsac_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsac_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfmsac_vv_f16m4_tu(vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmsac_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsac_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfmsac_vf_f16m4_tu(vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmsac_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsac_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f16m8_tu(vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmsac_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsac_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsac_vf_f16m8_tu(vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfmsac_vv_f32mf2_tu(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmsac_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmsac_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfmsac_vf_f32mf2_tu(vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfmsac_vv_f32m1_tu(vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmsac_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsac_vf_f32m1_tu(vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfmsac_vf_f32m1_tu(vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfmsac_vv_f32m2_tu(vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmsac_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsac_vf_f32m2_tu(vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfmsac_vf_f32m2_tu(vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmsac_vv_f32m4_tu(vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmsac_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsac_vf_f32m4_tu(vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmsac_vf_f32m4_tu(vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f32m8_tu(vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmsac_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsac_vf_f32m8_tu(vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsac_vf_f32m8_tu(vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfmsac_vv_f64m1_tu(vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmsac_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsac_vf_f64m1_tu(vfloat64m1_t vd, double rs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfmsac_vf_f64m1_tu(vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfmsac_vv_f64m2_tu(vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmsac_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsac_vf_f64m2_tu(vfloat64m2_t vd, double rs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfmsac_vf_f64m2_tu(vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfmsac_vv_f64m4_tu(vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmsac_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsac_vf_f64m4_tu(vfloat64m4_t vd, double rs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfmsac_vf_f64m4_tu(vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f64m8_tu(vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmsac_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsac_vf_f64m8_tu(vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsac_vf_f64m8_tu(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmsac_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsac_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16mf4_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmsac_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsac_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f16mf4_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmsac_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsac_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmsac_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsac_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f16mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmsac_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsac_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16m1_tum(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmsac_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsac_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f16m1_tum(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmsac_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsac_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16m2_tum(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmsac_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsac_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f16m2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmsac_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsac_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16m4_tum(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmsac_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsac_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f16m4_tum(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmsac_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsac_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16m8_tum(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmsac_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsac_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f16m8_tum(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmsac_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmsac_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmsac_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmsac_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f32mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmsac_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsac_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmsac_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsac_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfmsac_vf_f32m1_tum(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmsac_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsac_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmsac_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsac_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfmsac_vf_f32m2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmsac_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsac_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmsac_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsac_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmsac_vf_f32m4_tum(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmsac_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsac_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmsac_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsac_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsac_vf_f32m8_tum(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmsac_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsac_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmsac_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsac_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, double rs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfmsac_vf_f64m1_tum(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmsac_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsac_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmsac_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsac_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, double rs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfmsac_vf_f64m2_tum(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmsac_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsac_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmsac_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsac_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, double rs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfmsac_vf_f64m4_tum(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmsac_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsac_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmsac_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsac_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsac_vf_f64m8_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmsac_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsac_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16mf4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmsac_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsac_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f16mf4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmsac_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsac_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmsac_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsac_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f16mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmsac_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsac_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmsac_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsac_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f16m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmsac_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsac_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmsac_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsac_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f16m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmsac_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsac_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmsac_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsac_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f16m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmsac_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsac_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmsac_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsac_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f16m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmsac_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmsac_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmsac_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmsac_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f32mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmsac_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsac_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmsac_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsac_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfmsac_vf_f32m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmsac_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsac_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmsac_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsac_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfmsac_vf_f32m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmsac_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsac_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmsac_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsac_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmsac_vf_f32m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmsac_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsac_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmsac_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsac_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsac_vf_f32m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmsac_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsac_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmsac_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsac_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f64m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmsac_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsac_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmsac_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsac_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f64m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmsac_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsac_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmsac_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsac_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f64m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmsac_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsac_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmsac_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsac_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsac_vf_f64m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmsac_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsac_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16mf4_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmsac_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsac_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f16mf4_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmsac_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsac_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmsac_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsac_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f16mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmsac_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsac_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16m1_mu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmsac_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsac_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f16m1_mu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmsac_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsac_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16m2_mu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmsac_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsac_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfmsac_vf_f16m2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmsac_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsac_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16m4_mu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmsac_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsac_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfmsac_vf_f16m4_mu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmsac_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsac_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16m8_mu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmsac_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsac_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsac_vf_f16m8_mu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmsac_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmsac_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmsac_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmsac_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f32mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmsac_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsac_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmsac_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsac_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfmsac_vf_f32m1_mu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmsac_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsac_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmsac_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsac_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfmsac_vf_f32m2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmsac_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsac_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmsac_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsac_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmsac_vf_f32m4_mu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmsac_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsac_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmsac_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsac_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsac_vf_f32m8_mu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmsac_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsac_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmsac_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsac_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, double rs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfmsac_vf_f64m1_mu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmsac_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsac_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmsac_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsac_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, double rs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfmsac_vf_f64m2_mu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmsac_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsac_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmsac_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsac_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, double rs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfmsac_vf_f64m4_mu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmsac_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsac_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f64m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmsac_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsac_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsac_vf_f64m8_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmsac_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsac_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfmsac_vv_f16mf4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsac_vf_f16mf4_rm_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsac_vf_f16mf4_rm_tu(vfloat16mf4_t vd, _Float16 rs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfmsac_vf_f16mf4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsac_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsac_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfmsac_vv_f16mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsac_vf_f16mf2_rm_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsac_vf_f16mf2_rm_tu(vfloat16mf2_t vd, _Float16 rs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfmsac_vf_f16mf2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsac_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsac_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfmsac_vv_f16m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsac_vf_f16m1_rm_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsac_vf_f16m1_rm_tu(vfloat16m1_t vd, _Float16 rs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfmsac_vf_f16m1_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsac_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsac_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfmsac_vv_f16m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsac_vf_f16m2_rm_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsac_vf_f16m2_rm_tu(vfloat16m2_t vd, _Float16 rs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfmsac_vf_f16m2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsac_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsac_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfmsac_vv_f16m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsac_vf_f16m4_rm_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsac_vf_f16m4_rm_tu(vfloat16m4_t vd, _Float16 rs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfmsac_vf_f16m4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsac_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsac_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f16m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsac_vf_f16m8_rm_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsac_vf_f16m8_rm_tu(vfloat16m8_t vd, _Float16 rs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsac_vf_f16m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsac_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmsac_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfmsac_vv_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsac_vf_f32mf2_rm_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmsac_vf_f32mf2_rm_tu(vfloat32mf2_t vd, float rs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfmsac_vf_f32mf2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsac_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsac_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfmsac_vv_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsac_vf_f32m1_rm_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsac_vf_f32m1_rm_tu(vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfmsac_vf_f32m1_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsac_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsac_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfmsac_vv_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsac_vf_f32m2_rm_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsac_vf_f32m2_rm_tu(vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfmsac_vf_f32m2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsac_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsac_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmsac_vv_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsac_vf_f32m4_rm_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsac_vf_f32m4_rm_tu(vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmsac_vf_f32m4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsac_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsac_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsac_vf_f32m8_rm_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsac_vf_f32m8_rm_tu(vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsac_vf_f32m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsac_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsac_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfmsac_vv_f64m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsac_vf_f64m1_rm_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsac_vf_f64m1_rm_tu(vfloat64m1_t vd, double rs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfmsac_vf_f64m1_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsac_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsac_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfmsac_vv_f64m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsac_vf_f64m2_rm_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsac_vf_f64m2_rm_tu(vfloat64m2_t vd, double rs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfmsac_vf_f64m2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsac_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsac_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfmsac_vv_f64m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsac_vf_f64m4_rm_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsac_vf_f64m4_rm_tu(vfloat64m4_t vd, double rs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfmsac_vf_f64m4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsac_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsac_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsac_vv_f64m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsac_vf_f64m8_rm_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsac_vf_f64m8_rm_tu(vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsac_vf_f64m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsac_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsac_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16mf4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsac_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsac_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f16mf4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsac_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsac_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsac_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsac_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f16mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsac_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsac_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsac_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsac_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f16m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsac_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsac_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsac_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsac_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f16m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsac_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsac_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsac_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsac_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f16m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsac_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsac_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsac_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsac_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f16m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsac_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmsac_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsac_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmsac_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f32mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsac_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsac_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsac_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsac_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + float rs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f32m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsac_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsac_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsac_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsac_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + float rs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f32m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsac_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsac_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsac_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsac_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + float rs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f32m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsac_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsac_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsac_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsac_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + float rs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f32m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsac_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsac_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsac_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsac_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f64m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsac_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsac_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsac_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsac_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f64m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsac_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsac_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsac_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsac_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f64m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsac_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsac_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsac_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsac_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + double rs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f64m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsac_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16mf4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsac_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16mf4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf4_t test_vfmsac_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16mf4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsac_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfmsac_vf_f16mf4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf2_t test_vfmsac_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f16mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsac_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f16mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf2_t test_vfmsac_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f16mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsac_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfmsac_vf_f16mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m1_t test_vfmsac_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsac_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsac_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsac_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f16m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsac_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsac_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsac_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsac_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f16m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsac_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsac_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsac_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsac_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f16m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsac_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsac_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsac_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsac_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f16m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsac_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsac_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfmsac_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsac_vf_f32mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsac_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfmsac_vf_f32mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfmsac_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsac_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsac_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsac_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + float rs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f32m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsac_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsac_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsac_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsac_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + float rs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f32m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsac_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsac_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsac_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsac_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + float rs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f32m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsac_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsac_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsac_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsac_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + float rs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f32m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsac_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsac_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsac_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsac_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f64m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsac_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsac_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsac_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsac_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f64m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsac_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsac_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsac_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsac_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f64m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsac_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsac_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsac_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsac_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + double rs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f64m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsac_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsac_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16mf4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsac_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsac_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f16mf4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsac_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsac_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsac_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsac_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f16mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsac_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsac_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsac_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsac_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f16m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsac_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsac_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsac_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsac_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f16m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsac_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsac_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsac_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsac_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f16m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsac_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsac_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f16m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsac_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsac_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f16m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsac_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmsac_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsac_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmsac_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f32mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsac_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsac_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsac_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsac_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + float rs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f32m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsac_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsac_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsac_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsac_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + float rs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f32m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsac_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsac_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsac_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsac_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmsac_vf_f32m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsac_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsac_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsac_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsac_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsac_vf_f32m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsac_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsac_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsac_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsac_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f64m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsac_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsac_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsac_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsac_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f64m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsac_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsac_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsac_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsac_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f64m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsac_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsac_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmsac_vv_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsac_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsac_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + double rs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmsac_vf_f64m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfmsub.c b/auto-generated/policy_funcs/llvm-api-tests/vfmsub.c index b41d363ba..97202c3a7 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfmsub.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfmsub.c @@ -1,967 +1,1367 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vfmsub_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsub_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfmsub_vv_f16mf4_tu(vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmsub_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsub_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfmsub_vf_f16mf4_tu(vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmsub_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsub_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfmsub_vv_f16mf2_tu(vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmsub_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsub_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfmsub_vf_f16mf2_tu(vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmsub_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsub_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfmsub_vv_f16m1_tu(vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmsub_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsub_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfmsub_vf_f16m1_tu(vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmsub_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsub_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfmsub_vv_f16m2_tu(vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmsub_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsub_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfmsub_vf_f16m2_tu(vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmsub_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsub_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfmsub_vv_f16m4_tu(vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmsub_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsub_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfmsub_vf_f16m4_tu(vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmsub_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsub_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f16m8_tu(vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmsub_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsub_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsub_vf_f16m8_tu(vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmsub_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmsub_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfmsub_vv_f32mf2_tu(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmsub_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmsub_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfmsub_vf_f32mf2_tu(vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmsub_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsub_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfmsub_vv_f32m1_tu(vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmsub_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsub_vf_f32m1_tu(vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfmsub_vf_f32m1_tu(vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmsub_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsub_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfmsub_vv_f32m2_tu(vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmsub_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsub_vf_f32m2_tu(vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfmsub_vf_f32m2_tu(vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmsub_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsub_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmsub_vv_f32m4_tu(vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmsub_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsub_vf_f32m4_tu(vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmsub_vf_f32m4_tu(vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmsub_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsub_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f32m8_tu(vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmsub_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsub_vf_f32m8_tu(vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsub_vf_f32m8_tu(vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmsub_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsub_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfmsub_vv_f64m1_tu(vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmsub_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsub_vf_f64m1_tu(vfloat64m1_t vd, double rs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfmsub_vf_f64m1_tu(vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmsub_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsub_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfmsub_vv_f64m2_tu(vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmsub_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsub_vf_f64m2_tu(vfloat64m2_t vd, double rs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfmsub_vf_f64m2_tu(vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmsub_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsub_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfmsub_vv_f64m4_tu(vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmsub_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsub_vf_f64m4_tu(vfloat64m4_t vd, double rs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfmsub_vf_f64m4_tu(vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmsub_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsub_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f64m8_tu(vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmsub_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsub_vf_f64m8_tu(vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsub_vf_f64m8_tu(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmsub_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsub_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16mf4_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmsub_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsub_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f16mf4_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmsub_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsub_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmsub_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsub_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f16mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmsub_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsub_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16m1_tum(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmsub_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsub_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f16m1_tum(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmsub_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsub_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16m2_tum(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmsub_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsub_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f16m2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmsub_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsub_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16m4_tum(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmsub_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsub_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f16m4_tum(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmsub_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsub_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16m8_tum(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmsub_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsub_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f16m8_tum(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmsub_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmsub_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmsub_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmsub_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f32mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmsub_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsub_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmsub_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsub_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfmsub_vf_f32m1_tum(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmsub_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsub_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmsub_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsub_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfmsub_vf_f32m2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmsub_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsub_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmsub_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsub_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmsub_vf_f32m4_tum(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmsub_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsub_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmsub_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsub_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsub_vf_f32m8_tum(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmsub_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsub_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmsub_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsub_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, double rs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfmsub_vf_f64m1_tum(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmsub_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsub_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmsub_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsub_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, double rs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfmsub_vf_f64m2_tum(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmsub_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsub_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmsub_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsub_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, double rs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfmsub_vf_f64m4_tum(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmsub_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsub_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmsub_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsub_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsub_vf_f64m8_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmsub_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsub_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16mf4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmsub_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsub_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f16mf4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmsub_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsub_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmsub_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsub_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f16mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmsub_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsub_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmsub_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsub_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f16m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmsub_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsub_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmsub_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsub_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f16m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmsub_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsub_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmsub_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsub_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f16m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmsub_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsub_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmsub_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsub_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f16m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmsub_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmsub_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmsub_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmsub_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f32mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmsub_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsub_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmsub_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsub_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfmsub_vf_f32m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmsub_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsub_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmsub_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsub_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfmsub_vf_f32m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmsub_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsub_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmsub_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsub_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmsub_vf_f32m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmsub_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsub_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmsub_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsub_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsub_vf_f32m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmsub_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsub_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmsub_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsub_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f64m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmsub_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsub_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmsub_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsub_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f64m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmsub_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsub_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmsub_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsub_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f64m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmsub_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsub_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmsub_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsub_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsub_vf_f64m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmsub_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsub_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16mf4_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfmsub_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsub_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f16mf4_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfmsub_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsub_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfmsub_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsub_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f16mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfmsub_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsub_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16m1_mu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfmsub_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsub_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f16m1_mu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfmsub_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsub_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16m2_mu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfmsub_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsub_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfmsub_vf_f16m2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfmsub_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsub_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16m4_mu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfmsub_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsub_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfmsub_vf_f16m4_mu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfmsub_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsub_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16m8_mu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfmsub_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsub_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsub_vf_f16m8_mu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfmsub_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmsub_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfmsub_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmsub_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f32mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfmsub_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsub_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfmsub_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsub_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfmsub_vf_f32m1_mu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfmsub_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsub_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfmsub_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsub_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfmsub_vf_f32m2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfmsub_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsub_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfmsub_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsub_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmsub_vf_f32m4_mu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfmsub_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsub_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfmsub_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsub_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsub_vf_f32m8_mu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfmsub_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsub_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfmsub_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsub_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, double rs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfmsub_vf_f64m1_mu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfmsub_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsub_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfmsub_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsub_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, double rs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfmsub_vf_f64m2_mu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfmsub_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsub_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfmsub_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsub_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, double rs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfmsub_vf_f64m4_mu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfmsub_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsub_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f64m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfmsub_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsub_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsub_vf_f64m8_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfmsub_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsub_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfmsub_vv_f16mf4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsub_vf_f16mf4_rm_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsub_vf_f16mf4_rm_tu(vfloat16mf4_t vd, _Float16 rs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfmsub_vf_f16mf4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsub_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsub_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfmsub_vv_f16mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsub_vf_f16mf2_rm_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsub_vf_f16mf2_rm_tu(vfloat16mf2_t vd, _Float16 rs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfmsub_vf_f16mf2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsub_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsub_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfmsub_vv_f16m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsub_vf_f16m1_rm_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsub_vf_f16m1_rm_tu(vfloat16m1_t vd, _Float16 rs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfmsub_vf_f16m1_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsub_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsub_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfmsub_vv_f16m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsub_vf_f16m2_rm_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsub_vf_f16m2_rm_tu(vfloat16m2_t vd, _Float16 rs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfmsub_vf_f16m2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsub_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsub_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfmsub_vv_f16m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsub_vf_f16m4_rm_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsub_vf_f16m4_rm_tu(vfloat16m4_t vd, _Float16 rs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfmsub_vf_f16m4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsub_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsub_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f16m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsub_vf_f16m8_rm_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsub_vf_f16m8_rm_tu(vfloat16m8_t vd, _Float16 rs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfmsub_vf_f16m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsub_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmsub_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfmsub_vv_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsub_vf_f32mf2_rm_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmsub_vf_f32mf2_rm_tu(vfloat32mf2_t vd, float rs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfmsub_vf_f32mf2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsub_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsub_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfmsub_vv_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsub_vf_f32m1_rm_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsub_vf_f32m1_rm_tu(vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfmsub_vf_f32m1_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsub_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsub_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfmsub_vv_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsub_vf_f32m2_rm_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsub_vf_f32m2_rm_tu(vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfmsub_vf_f32m2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsub_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsub_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmsub_vv_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsub_vf_f32m4_rm_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsub_vf_f32m4_rm_tu(vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmsub_vf_f32m4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsub_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsub_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsub_vf_f32m8_rm_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsub_vf_f32m8_rm_tu(vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsub_vf_f32m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsub_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsub_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfmsub_vv_f64m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsub_vf_f64m1_rm_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsub_vf_f64m1_rm_tu(vfloat64m1_t vd, double rs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfmsub_vf_f64m1_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsub_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsub_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfmsub_vv_f64m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsub_vf_f64m2_rm_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsub_vf_f64m2_rm_tu(vfloat64m2_t vd, double rs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfmsub_vf_f64m2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsub_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsub_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfmsub_vv_f64m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsub_vf_f64m4_rm_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsub_vf_f64m4_rm_tu(vfloat64m4_t vd, double rs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfmsub_vf_f64m4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsub_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsub_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsub_vv_f64m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsub_vf_f64m8_rm_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsub_vf_f64m8_rm_tu(vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfmsub_vf_f64m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsub_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsub_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16mf4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsub_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsub_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f16mf4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsub_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsub_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsub_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsub_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f16mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsub_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsub_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsub_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsub_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f16m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsub_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsub_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsub_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsub_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f16m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsub_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsub_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsub_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsub_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f16m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsub_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsub_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsub_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsub_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f16m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsub_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmsub_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsub_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmsub_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f32mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsub_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsub_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsub_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsub_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + float rs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f32m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsub_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsub_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsub_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsub_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + float rs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f32m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsub_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsub_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsub_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsub_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + float rs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f32m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsub_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsub_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsub_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsub_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + float rs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f32m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsub_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsub_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsub_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsub_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f64m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsub_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsub_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsub_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsub_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f64m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsub_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsub_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsub_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsub_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f64m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsub_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsub_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsub_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsub_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + double rs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f64m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsub_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16mf4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsub_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16mf4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf4_t test_vfmsub_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16mf4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfmsub_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfmsub_vf_f16mf4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf2_t test_vfmsub_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f16mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsub_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f16mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf2_t test_vfmsub_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f16mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfmsub_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfmsub_vf_f16mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m1_t test_vfmsub_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsub_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsub_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsub_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f16m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsub_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsub_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsub_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsub_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f16m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsub_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsub_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsub_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsub_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f16m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsub_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsub_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsub_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsub_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f16m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsub_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsub_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfmsub_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfmsub_vf_f32mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfmsub_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfmsub_vf_f32mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfmsub_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsub_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsub_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsub_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + float rs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f32m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsub_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsub_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsub_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsub_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + float rs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f32m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsub_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsub_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsub_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsub_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + float rs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f32m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsub_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsub_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsub_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsub_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + float rs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f32m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsub_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsub_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsub_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsub_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f64m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsub_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsub_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsub_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsub_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f64m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsub_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsub_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsub_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsub_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f64m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsub_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsub_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsub_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsub_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + double rs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f64m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsub_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsub_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16mf4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmsub_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfmsub_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f16mf4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsub_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsub_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmsub_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfmsub_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f16mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsub_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsub_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmsub_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfmsub_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f16m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsub_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsub_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmsub_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfmsub_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f16m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsub_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsub_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmsub_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfmsub_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f16m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsub_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsub_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f16m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmsub_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfmsub_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f16m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsub_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmsub_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmsub_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfmsub_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f32mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsub_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsub_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmsub_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfmsub_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + float rs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f32m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsub_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsub_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmsub_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfmsub_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + float rs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f32m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsub_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsub_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmsub_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfmsub_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfmsub_vf_f32m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsub_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsub_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmsub_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfmsub_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfmsub_vf_f32m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsub_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsub_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmsub_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfmsub_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f64m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsub_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsub_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmsub_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfmsub_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f64m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsub_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsub_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmsub_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfmsub_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f64m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsub_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsub_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmsub_vv_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmsub_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfmsub_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + double rs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfmsub_vf_f64m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfmul.c b/auto-generated/policy_funcs/llvm-api-tests/vfmul.c index ab044a8c1..9dbac1615 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfmul.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfmul.c @@ -1,967 +1,1354 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vfmul_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfmul_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + vfloat16mf4_t vs1, size_t vl) { return __riscv_vfmul_vv_f16mf4_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfmul_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfmul_vf_f16mf4_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfmul_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + vfloat16mf2_t vs1, size_t vl) { return __riscv_vfmul_vv_f16mf2_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfmul_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfmul_vf_f16mf2_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmul_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfmul_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfmul_vv_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmul_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfmul_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfmul_vf_f16m1_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmul_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfmul_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + vfloat16m2_t vs1, size_t vl) { return __riscv_vfmul_vv_f16m2_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmul_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfmul_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfmul_vf_f16m2_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmul_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfmul_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + vfloat16m4_t vs1, size_t vl) { return __riscv_vfmul_vv_f16m4_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmul_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfmul_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfmul_vf_f16m4_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmul_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfmul_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + vfloat16m8_t vs1, size_t vl) { return __riscv_vfmul_vv_f16m8_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmul_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfmul_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfmul_vf_f16m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfmul_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + vfloat32mf2_t vs1, size_t vl) { return __riscv_vfmul_vv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfmul_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + float rs1, size_t vl) { return __riscv_vfmul_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmul_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfmul_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfmul_vv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmul_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfmul_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + float rs1, size_t vl) { return __riscv_vfmul_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmul_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfmul_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + vfloat32m2_t vs1, size_t vl) { return __riscv_vfmul_vv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmul_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfmul_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + float rs1, size_t vl) { return __riscv_vfmul_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmul_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfmul_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + vfloat32m4_t vs1, size_t vl) { return __riscv_vfmul_vv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmul_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfmul_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + float rs1, size_t vl) { return __riscv_vfmul_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmul_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfmul_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + vfloat32m8_t vs1, size_t vl) { return __riscv_vfmul_vv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmul_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfmul_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + float rs1, size_t vl) { return __riscv_vfmul_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmul_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfmul_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfmul_vv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmul_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfmul_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + double rs1, size_t vl) { return __riscv_vfmul_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmul_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfmul_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + vfloat64m2_t vs1, size_t vl) { return __riscv_vfmul_vv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmul_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfmul_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + double rs1, size_t vl) { return __riscv_vfmul_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmul_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfmul_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + vfloat64m4_t vs1, size_t vl) { return __riscv_vfmul_vv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmul_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfmul_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + double rs1, size_t vl) { return __riscv_vfmul_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmul_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfmul_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + vfloat64m8_t vs1, size_t vl) { return __riscv_vfmul_vv_f64m8_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmul_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfmul_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + double rs1, size_t vl) { return __riscv_vfmul_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfmul_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfmul_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16mf4_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfmul_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16mf4_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfmul_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfmul_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmul_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfmul_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmul_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfmul_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16m1_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmul_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfmul_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16m2_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmul_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfmul_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16m2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmul_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfmul_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16m4_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmul_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfmul_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16m4_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmul_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfmul_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16m8_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmul_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfmul_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfmul_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfmul_vv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfmul_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfmul_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmul_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfmul_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfmul_vv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmul_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfmul_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfmul_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmul_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfmul_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfmul_vv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmul_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfmul_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfmul_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmul_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfmul_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfmul_vv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmul_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfmul_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfmul_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmul_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfmul_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfmul_vv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmul_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfmul_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfmul_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmul_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfmul_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfmul_vv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmul_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfmul_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vfmul_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmul_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfmul_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfmul_vv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmul_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfmul_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vfmul_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmul_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfmul_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfmul_vv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmul_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfmul_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vfmul_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmul_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfmul_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfmul_vv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmul_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfmul_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, size_t vl) { return __riscv_vfmul_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfmul_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfmul_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16mf4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfmul_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16mf4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfmul_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfmul_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmul_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfmul_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmul_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfmul_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmul_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfmul_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmul_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfmul_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmul_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfmul_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmul_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfmul_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmul_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfmul_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmul_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfmul_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfmul_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfmul_vv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfmul_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfmul_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmul_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfmul_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfmul_vv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmul_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfmul_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfmul_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmul_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfmul_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfmul_vv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmul_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfmul_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfmul_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmul_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfmul_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfmul_vv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmul_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfmul_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfmul_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmul_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfmul_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfmul_vv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmul_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfmul_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfmul_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmul_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfmul_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfmul_vv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmul_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfmul_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vfmul_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmul_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfmul_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfmul_vv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmul_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfmul_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vfmul_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmul_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfmul_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfmul_vv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmul_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfmul_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vfmul_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmul_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfmul_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfmul_vv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmul_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfmul_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, size_t vl) { return __riscv_vfmul_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfmul_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfmul_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16mf4_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfmul_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16mf4_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfmul_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfmul_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfmul_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfmul_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16m1_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfmul_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfmul_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, size_t vl) { return __riscv_vfmul_vf_f16m1_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfmul_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfmul_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16m2_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfmul_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfmul_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, size_t vl) { return __riscv_vfmul_vf_f16m2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfmul_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfmul_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16m4_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfmul_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfmul_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, size_t vl) { return __riscv_vfmul_vf_f16m4_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfmul_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfmul_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16m8_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfmul_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfmul_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, size_t vl) { return __riscv_vfmul_vf_f16m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfmul_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfmul_vv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfmul_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, size_t vl) { return __riscv_vfmul_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfmul_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfmul_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfmul_vv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfmul_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfmul_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfmul_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfmul_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfmul_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfmul_vv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfmul_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfmul_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfmul_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfmul_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfmul_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfmul_vv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfmul_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfmul_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfmul_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfmul_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfmul_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfmul_vv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfmul_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfmul_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfmul_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfmul_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfmul_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfmul_vv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfmul_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfmul_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vfmul_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfmul_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfmul_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfmul_vv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfmul_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfmul_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vfmul_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfmul_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfmul_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfmul_vv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfmul_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfmul_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vfmul_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfmul_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfmul_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfmul_vv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfmul_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfmul_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, size_t vl) { return __riscv_vfmul_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfmul_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfmul_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + vfloat16mf4_t vs1, size_t vl) { return __riscv_vfmul_vv_f16mf4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfmul_vf_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfmul_vf_f16mf4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfmul_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + vfloat16mf2_t vs1, size_t vl) { return __riscv_vfmul_vv_f16mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfmul_vf_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfmul_vf_f16mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfmul_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfmul_vv_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vf_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfmul_vf_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfmul_vf_f16m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfmul_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + vfloat16m2_t vs1, size_t vl) { return __riscv_vfmul_vv_f16m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vf_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfmul_vf_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfmul_vf_f16m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfmul_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + vfloat16m4_t vs1, size_t vl) { return __riscv_vfmul_vv_f16m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vf_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfmul_vf_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfmul_vf_f16m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfmul_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + vfloat16m8_t vs1, size_t vl) { return __riscv_vfmul_vv_f16m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vf_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfmul_vf_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfmul_vf_f16m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfmul_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + vfloat32mf2_t vs1, size_t vl) { return __riscv_vfmul_vv_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfmul_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + float rs1, size_t vl) { return __riscv_vfmul_vf_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfmul_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfmul_vv_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfmul_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + float rs1, size_t vl) { return __riscv_vfmul_vf_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfmul_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + vfloat32m2_t vs1, size_t vl) { return __riscv_vfmul_vv_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfmul_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + float rs1, size_t vl) { return __riscv_vfmul_vf_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfmul_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + vfloat32m4_t vs1, size_t vl) { return __riscv_vfmul_vv_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfmul_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + float rs1, size_t vl) { return __riscv_vfmul_vf_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfmul_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + vfloat32m8_t vs1, size_t vl) { return __riscv_vfmul_vv_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfmul_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + float rs1, size_t vl) { return __riscv_vfmul_vf_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfmul_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfmul_vv_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfmul_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + double rs1, size_t vl) { return __riscv_vfmul_vf_f64m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfmul_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + vfloat64m2_t vs1, size_t vl) { return __riscv_vfmul_vv_f64m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfmul_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + double rs1, size_t vl) { return __riscv_vfmul_vf_f64m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfmul_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + vfloat64m4_t vs1, size_t vl) { return __riscv_vfmul_vv_f64m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfmul_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + double rs1, size_t vl) { return __riscv_vfmul_vf_f64m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfmul_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + vfloat64m8_t vs1, size_t vl) { return __riscv_vfmul_vv_f64m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfmul_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + double rs1, size_t vl) { return __riscv_vfmul_vf_f64m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmul_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfmul_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16mf4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfmul_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16mf4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfmul_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16mf2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfmul_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfmul_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfmul_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfmul_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfmul_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfmul_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfmul_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfmul_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfmul_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfmul_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfmul_vv_f32mf2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfmul_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfmul_vf_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfmul_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfmul_vv_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfmul_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, + size_t vl) { return __riscv_vfmul_vf_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfmul_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfmul_vv_f32m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfmul_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, + size_t vl) { return __riscv_vfmul_vf_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfmul_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfmul_vv_f32m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfmul_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, + size_t vl) { return __riscv_vfmul_vf_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfmul_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfmul_vv_f32m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfmul_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, + size_t vl) { return __riscv_vfmul_vf_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfmul_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfmul_vv_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfmul_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, + size_t vl) { return __riscv_vfmul_vf_f64m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfmul_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfmul_vv_f64m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfmul_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, + size_t vl) { return __riscv_vfmul_vf_f64m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfmul_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfmul_vv_f64m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfmul_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, + size_t vl) { return __riscv_vfmul_vf_f64m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfmul_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfmul_vv_f64m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfmul_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, + size_t vl) { return __riscv_vfmul_vf_f64m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmul_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfmul_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16mf4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfmul_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16mf4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfmul_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16mf2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfmul_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfmul_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfmul_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfmul_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfmul_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfmul_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfmul_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfmul_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfmul_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfmul_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfmul_vv_f32mf2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfmul_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfmul_vf_f32mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfmul_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfmul_vv_f32m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfmul_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, + size_t vl) { return __riscv_vfmul_vf_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfmul_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfmul_vv_f32m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfmul_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, + size_t vl) { return __riscv_vfmul_vf_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfmul_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfmul_vv_f32m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfmul_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, + size_t vl) { return __riscv_vfmul_vf_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfmul_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfmul_vv_f32m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfmul_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, + size_t vl) { return __riscv_vfmul_vf_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfmul_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfmul_vv_f64m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfmul_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, + size_t vl) { return __riscv_vfmul_vf_f64m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfmul_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfmul_vv_f64m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfmul_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, + size_t vl) { return __riscv_vfmul_vf_f64m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfmul_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfmul_vv_f64m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfmul_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, + size_t vl) { return __riscv_vfmul_vf_f64m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfmul_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfmul_vv_f64m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfmul_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, + size_t vl) { return __riscv_vfmul_vf_f64m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmul_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfmul_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16mf4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfmul_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfmul_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16mf4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfmul_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16mf2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfmul_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfmul_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfmul_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfmul_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfmul_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfmul_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfmul_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfmul_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfmul_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfmul_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfmul_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfmul_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfmul_vv_f16m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfmul_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfmul_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfmul_vf_f16m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfmul_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfmul_vv_f32mf2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfmul_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfmul_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfmul_vf_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfmul_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfmul_vv_f32m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfmul_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfmul_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfmul_vf_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfmul_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfmul_vv_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfmul_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfmul_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfmul_vf_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfmul_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfmul_vv_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfmul_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfmul_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfmul_vf_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfmul_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfmul_vv_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfmul_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfmul_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfmul_vf_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfmul_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfmul_vv_f64m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfmul_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfmul_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, + size_t vl) { return __riscv_vfmul_vf_f64m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfmul_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfmul_vv_f64m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfmul_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfmul_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, + size_t vl) { return __riscv_vfmul_vf_f64m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfmul_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfmul_vv_f64m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfmul_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfmul_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, + size_t vl) { return __riscv_vfmul_vf_f64m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfmul_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfmul_vv_f64m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfmul_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfmul_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, + size_t vl) { return __riscv_vfmul_vf_f64m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfmv.c b/auto-generated/policy_funcs/llvm-api-tests/vfmv.c index dedd6a4f5..e5d156a50 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfmv.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfmv.c @@ -1,16 +1,18 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vfmv_v_f_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfmv_v_f_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, + size_t vl) { return __riscv_vfmv_v_f_f16mf4_tu(vd, rs1, vl); } -vfloat16mf2_t test_vfmv_v_f_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfmv_v_f_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, + size_t vl) { return __riscv_vfmv_v_f_f16mf2_tu(vd, rs1, vl); } @@ -66,11 +68,13 @@ vfloat64m8_t test_vfmv_v_f_f64m8_tu(vfloat64m8_t vd, double rs1, size_t vl) { return __riscv_vfmv_v_f_f64m8_tu(vd, rs1, vl); } -vfloat16mf4_t test_vfmv_s_f_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfmv_s_f_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, + size_t vl) { return __riscv_vfmv_s_f_f16mf4_tu(vd, rs1, vl); } -vfloat16mf2_t test_vfmv_s_f_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfmv_s_f_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, + size_t vl) { return __riscv_vfmv_s_f_f16mf2_tu(vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfncvt.c b/auto-generated/policy_funcs/llvm-api-tests/vfncvt.c index 95f962411..551f7cae3 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfncvt.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfncvt.c @@ -1,20 +1,23 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vint8mf8_t test_vfncvt_x_f_w_i8mf8_tu(vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { +vint8mf8_t test_vfncvt_x_f_w_i8mf8_tu(vint8mf8_t vd, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfncvt_x_f_w_i8mf8_tu(vd, vs2, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4_tu(vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { +vint8mf4_t test_vfncvt_x_f_w_i8mf4_tu(vint8mf4_t vd, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfncvt_x_f_w_i8mf4_tu(vd, vs2, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2_tu(vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { +vint8mf2_t test_vfncvt_x_f_w_i8mf2_tu(vint8mf2_t vd, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfncvt_x_f_w_i8mf2_tu(vd, vs2, vl); } @@ -30,1802 +33,2252 @@ vint8m4_t test_vfncvt_x_f_w_i8m4_tu(vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8m4_tu(vd, vs2, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_tu(vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_tu(vuint8mf8_t vd, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfncvt_xu_f_w_u8mf8_tu(vd, vs2, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_tu(vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_tu(vuint8mf4_t vd, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfncvt_xu_f_w_u8mf4_tu(vd, vs2, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_tu(vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_tu(vuint8mf2_t vd, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfncvt_xu_f_w_u8mf2_tu(vd, vs2, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1_tu(vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { +vuint8m1_t test_vfncvt_xu_f_w_u8m1_tu(vuint8m1_t vd, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfncvt_xu_f_w_u8m1_tu(vd, vs2, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2_tu(vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { +vuint8m2_t test_vfncvt_xu_f_w_u8m2_tu(vuint8m2_t vd, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfncvt_xu_f_w_u8m2_tu(vd, vs2, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4_tu(vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { +vuint8m4_t test_vfncvt_xu_f_w_u8m4_tu(vuint8m4_t vd, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfncvt_xu_f_w_u8m4_tu(vd, vs2, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4_tu(vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vint16mf4_t test_vfncvt_x_f_w_i16mf4_tu(vint16mf4_t vd, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfncvt_x_f_w_i16mf4_tu(vd, vs2, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2_tu(vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vint16mf2_t test_vfncvt_x_f_w_i16mf2_tu(vint16mf2_t vd, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfncvt_x_f_w_i16mf2_tu(vd, vs2, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1_tu(vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vint16m1_t test_vfncvt_x_f_w_i16m1_tu(vint16m1_t vd, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfncvt_x_f_w_i16m1_tu(vd, vs2, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2_tu(vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vint16m2_t test_vfncvt_x_f_w_i16m2_tu(vint16m2_t vd, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfncvt_x_f_w_i16m2_tu(vd, vs2, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4_tu(vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vint16m4_t test_vfncvt_x_f_w_i16m4_tu(vint16m4_t vd, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfncvt_x_f_w_i16m4_tu(vd, vs2, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tu(vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tu(vuint16mf4_t vd, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfncvt_xu_f_w_u16mf4_tu(vd, vs2, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_tu(vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_tu(vuint16mf2_t vd, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfncvt_xu_f_w_u16mf2_tu(vd, vs2, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1_tu(vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vuint16m1_t test_vfncvt_xu_f_w_u16m1_tu(vuint16m1_t vd, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfncvt_xu_f_w_u16m1_tu(vd, vs2, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2_tu(vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vuint16m2_t test_vfncvt_xu_f_w_u16m2_tu(vuint16m2_t vd, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfncvt_xu_f_w_u16m2_tu(vd, vs2, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4_tu(vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vuint16m4_t test_vfncvt_xu_f_w_u16m4_tu(vuint16m4_t vd, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfncvt_xu_f_w_u16m4_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tu(vfloat16mf4_t vd, vint32mf2_t vs2, size_t vl) { +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tu(vfloat16mf4_t vd, vint32mf2_t vs2, + size_t vl) { return __riscv_vfncvt_f_x_w_f16mf4_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_tu(vfloat16mf2_t vd, vint32m1_t vs2, size_t vl) { +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_tu(vfloat16mf2_t vd, vint32m1_t vs2, + size_t vl) { return __riscv_vfncvt_f_x_w_f16mf2_tu(vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1_tu(vfloat16m1_t vd, vint32m2_t vs2, size_t vl) { +vfloat16m1_t test_vfncvt_f_x_w_f16m1_tu(vfloat16m1_t vd, vint32m2_t vs2, + size_t vl) { return __riscv_vfncvt_f_x_w_f16m1_tu(vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2_tu(vfloat16m2_t vd, vint32m4_t vs2, size_t vl) { +vfloat16m2_t test_vfncvt_f_x_w_f16m2_tu(vfloat16m2_t vd, vint32m4_t vs2, + size_t vl) { return __riscv_vfncvt_f_x_w_f16m2_tu(vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4_tu(vfloat16m4_t vd, vint32m8_t vs2, size_t vl) { +vfloat16m4_t test_vfncvt_f_x_w_f16m4_tu(vfloat16m4_t vd, vint32m8_t vs2, + size_t vl) { return __riscv_vfncvt_f_x_w_f16m4_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tu(vfloat16mf4_t vd, vuint32mf2_t vs2, size_t vl) { +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tu(vfloat16mf4_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vfncvt_f_xu_w_f16mf4_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_tu(vfloat16mf2_t vd, vuint32m1_t vs2, size_t vl) { +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_tu(vfloat16mf2_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vfncvt_f_xu_w_f16mf2_tu(vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_tu(vfloat16m1_t vd, vuint32m2_t vs2, size_t vl) { +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_tu(vfloat16m1_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vfncvt_f_xu_w_f16m1_tu(vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_tu(vfloat16m2_t vd, vuint32m4_t vs2, size_t vl) { +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_tu(vfloat16m2_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vfncvt_f_xu_w_f16m2_tu(vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_tu(vfloat16m4_t vd, vuint32m8_t vs2, size_t vl) { +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_tu(vfloat16m4_t vd, vuint32m8_t vs2, + size_t vl) { return __riscv_vfncvt_f_xu_w_f16m4_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tu(vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tu(vfloat16mf4_t vd, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfncvt_f_f_w_f16mf4_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_tu(vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_tu(vfloat16mf2_t vd, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfncvt_f_f_w_f16mf2_tu(vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1_tu(vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat16m1_t test_vfncvt_f_f_w_f16m1_tu(vfloat16m1_t vd, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfncvt_f_f_w_f16m1_tu(vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2_tu(vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat16m2_t test_vfncvt_f_f_w_f16m2_tu(vfloat16m2_t vd, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfncvt_f_f_w_f16m2_tu(vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4_tu(vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat16m4_t test_vfncvt_f_f_w_f16m4_tu(vfloat16m4_t vd, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfncvt_f_f_w_f16m4_tu(vd, vs2, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2_tu(vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vint32mf2_t test_vfncvt_x_f_w_i32mf2_tu(vint32mf2_t vd, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfncvt_x_f_w_i32mf2_tu(vd, vs2, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1_tu(vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vint32m1_t test_vfncvt_x_f_w_i32m1_tu(vint32m1_t vd, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfncvt_x_f_w_i32m1_tu(vd, vs2, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2_tu(vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vint32m2_t test_vfncvt_x_f_w_i32m2_tu(vint32m2_t vd, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfncvt_x_f_w_i32m2_tu(vd, vs2, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4_tu(vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vint32m4_t test_vfncvt_x_f_w_i32m4_tu(vint32m4_t vd, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfncvt_x_f_w_i32m4_tu(vd, vs2, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_tu(vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_tu(vuint32mf2_t vd, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfncvt_xu_f_w_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1_tu(vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vuint32m1_t test_vfncvt_xu_f_w_u32m1_tu(vuint32m1_t vd, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfncvt_xu_f_w_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2_tu(vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vuint32m2_t test_vfncvt_xu_f_w_u32m2_tu(vuint32m2_t vd, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfncvt_xu_f_w_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4_tu(vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vuint32m4_t test_vfncvt_xu_f_w_u32m4_tu(vuint32m4_t vd, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfncvt_xu_f_w_u32m4_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_tu(vfloat32mf2_t vd, vint64m1_t vs2, size_t vl) { +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_tu(vfloat32mf2_t vd, vint64m1_t vs2, + size_t vl) { return __riscv_vfncvt_f_x_w_f32mf2_tu(vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1_tu(vfloat32m1_t vd, vint64m2_t vs2, size_t vl) { +vfloat32m1_t test_vfncvt_f_x_w_f32m1_tu(vfloat32m1_t vd, vint64m2_t vs2, + size_t vl) { return __riscv_vfncvt_f_x_w_f32m1_tu(vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2_tu(vfloat32m2_t vd, vint64m4_t vs2, size_t vl) { +vfloat32m2_t test_vfncvt_f_x_w_f32m2_tu(vfloat32m2_t vd, vint64m4_t vs2, + size_t vl) { return __riscv_vfncvt_f_x_w_f32m2_tu(vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4_tu(vfloat32m4_t vd, vint64m8_t vs2, size_t vl) { +vfloat32m4_t test_vfncvt_f_x_w_f32m4_tu(vfloat32m4_t vd, vint64m8_t vs2, + size_t vl) { return __riscv_vfncvt_f_x_w_f32m4_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_tu(vfloat32mf2_t vd, vuint64m1_t vs2, size_t vl) { +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_tu(vfloat32mf2_t vd, vuint64m1_t vs2, + size_t vl) { return __riscv_vfncvt_f_xu_w_f32mf2_tu(vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_tu(vfloat32m1_t vd, vuint64m2_t vs2, size_t vl) { +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_tu(vfloat32m1_t vd, vuint64m2_t vs2, + size_t vl) { return __riscv_vfncvt_f_xu_w_f32m1_tu(vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_tu(vfloat32m2_t vd, vuint64m4_t vs2, size_t vl) { +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_tu(vfloat32m2_t vd, vuint64m4_t vs2, + size_t vl) { return __riscv_vfncvt_f_xu_w_f32m2_tu(vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_tu(vfloat32m4_t vd, vuint64m8_t vs2, size_t vl) { +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_tu(vfloat32m4_t vd, vuint64m8_t vs2, + size_t vl) { return __riscv_vfncvt_f_xu_w_f32m4_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_tu(vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_tu(vfloat32mf2_t vd, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfncvt_f_f_w_f32mf2_tu(vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1_tu(vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat32m1_t test_vfncvt_f_f_w_f32m1_tu(vfloat32m1_t vd, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfncvt_f_f_w_f32m1_tu(vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2_tu(vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat32m2_t test_vfncvt_f_f_w_f32m2_tu(vfloat32m2_t vd, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfncvt_f_f_w_f32m2_tu(vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4_tu(vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat32m4_t test_vfncvt_f_f_w_f32m4_tu(vfloat32m4_t vd, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfncvt_f_f_w_f32m4_tu(vd, vs2, vl); } -vint8mf8_t test_vfncvt_x_f_w_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { +vint8mf8_t test_vfncvt_x_f_w_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8mf8_tum(vm, vd, vs2, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { +vint8mf4_t test_vfncvt_x_f_w_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8mf4_tum(vm, vd, vs2, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { +vint8mf2_t test_vfncvt_x_f_w_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8mf2_tum(vm, vd, vs2, vl); } -vint8m1_t test_vfncvt_x_f_w_i8m1_tum(vbool8_t vm, vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { +vint8m1_t test_vfncvt_x_f_w_i8m1_tum(vbool8_t vm, vint8m1_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8m1_tum(vm, vd, vs2, vl); } -vint8m2_t test_vfncvt_x_f_w_i8m2_tum(vbool4_t vm, vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { +vint8m2_t test_vfncvt_x_f_w_i8m2_tum(vbool4_t vm, vint8m2_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8m2_tum(vm, vd, vs2, vl); } -vint8m4_t test_vfncvt_x_f_w_i8m4_tum(vbool2_t vm, vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { +vint8m4_t test_vfncvt_x_f_w_i8m4_tum(vbool2_t vm, vint8m4_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8m4_tum(vm, vd, vs2, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8mf8_tum(vm, vd, vs2, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8mf4_tum(vm, vd, vs2, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8mf2_tum(vm, vd, vs2, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { +vuint8m1_t test_vfncvt_xu_f_w_u8m1_tum(vbool8_t vm, vuint8m1_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8m1_tum(vm, vd, vs2, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { +vuint8m2_t test_vfncvt_xu_f_w_u8m2_tum(vbool4_t vm, vuint8m2_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8m2_tum(vm, vd, vs2, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { +vuint8m4_t test_vfncvt_xu_f_w_u8m4_tum(vbool2_t vm, vuint8m4_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8m4_tum(vm, vd, vs2, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vint16mf4_t test_vfncvt_x_f_w_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i16mf4_tum(vm, vd, vs2, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vint16mf2_t test_vfncvt_x_f_w_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i16mf2_tum(vm, vd, vs2, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1_tum(vbool16_t vm, vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vint16m1_t test_vfncvt_x_f_w_i16m1_tum(vbool16_t vm, vint16m1_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i16m1_tum(vm, vd, vs2, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2_tum(vbool8_t vm, vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vint16m2_t test_vfncvt_x_f_w_i16m2_tum(vbool8_t vm, vint16m2_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i16m2_tum(vm, vd, vs2, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4_tum(vbool4_t vm, vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vint16m4_t test_vfncvt_x_f_w_i16m4_tum(vbool4_t vm, vint16m4_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i16m4_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u16mf4_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u16mf2_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vuint16m1_t test_vfncvt_xu_f_w_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u16m1_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vuint16m2_t test_vfncvt_xu_f_w_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u16m2_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vuint16m4_t test_vfncvt_xu_f_w_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u16m4_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vint32mf2_t vs2, size_t vl) { +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vint32mf2_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f16mf4_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vint32m1_t vs2, size_t vl) { +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vint32m1_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f16mf2_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vint32m2_t vs2, size_t vl) { +vfloat16m1_t test_vfncvt_f_x_w_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vint32m2_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f16m1_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vint32m4_t vs2, size_t vl) { +vfloat16m2_t test_vfncvt_f_x_w_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vint32m4_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f16m2_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vint32m8_t vs2, size_t vl) { +vfloat16m4_t test_vfncvt_f_x_w_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vint32m8_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f16m4_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vuint32mf2_t vs2, size_t vl) { +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vuint32mf2_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f16mf4_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vuint32m1_t vs2, size_t vl) { +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vuint32m1_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f16mf2_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vuint32m2_t vs2, size_t vl) { +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vuint32m2_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f16m1_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vuint32m4_t vs2, size_t vl) { +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vuint32m4_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f16m2_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vuint32m8_t vs2, size_t vl) { +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vuint32m8_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f16m4_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f16mf4_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f16mf2_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat16m1_t test_vfncvt_f_f_w_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f16m1_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat16m2_t test_vfncvt_f_f_w_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f16m2_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat16m4_t test_vfncvt_f_f_w_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f16m4_tum(vm, vd, vs2, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vint32mf2_t test_vfncvt_x_f_w_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i32mf2_tum(vm, vd, vs2, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1_tum(vbool32_t vm, vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vint32m1_t test_vfncvt_x_f_w_i32m1_tum(vbool32_t vm, vint32m1_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i32m1_tum(vm, vd, vs2, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2_tum(vbool16_t vm, vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vint32m2_t test_vfncvt_x_f_w_i32m2_tum(vbool16_t vm, vint32m2_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i32m2_tum(vm, vd, vs2, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4_tum(vbool8_t vm, vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vint32m4_t test_vfncvt_x_f_w_i32m4_tum(vbool8_t vm, vint32m4_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i32m4_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u32mf2_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vuint32m1_t test_vfncvt_xu_f_w_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u32m1_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vuint32m2_t test_vfncvt_xu_f_w_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u32m2_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vuint32m4_t test_vfncvt_xu_f_w_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u32m4_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vint64m1_t vs2, size_t vl) { +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vint64m1_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f32mf2_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vint64m2_t vs2, size_t vl) { +vfloat32m1_t test_vfncvt_f_x_w_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vint64m2_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f32m1_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vint64m4_t vs2, size_t vl) { +vfloat32m2_t test_vfncvt_f_x_w_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vint64m4_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f32m2_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vint64m8_t vs2, size_t vl) { +vfloat32m4_t test_vfncvt_f_x_w_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vint64m8_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f32m4_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vuint64m1_t vs2, size_t vl) { +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vuint64m1_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f32mf2_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vuint64m2_t vs2, size_t vl) { +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vuint64m2_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f32m1_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vuint64m4_t vs2, size_t vl) { +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vuint64m4_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f32m2_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vuint64m8_t vs2, size_t vl) { +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vuint64m8_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f32m4_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f32mf2_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat32m1_t test_vfncvt_f_f_w_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f32m1_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat32m2_t test_vfncvt_f_f_w_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f32m2_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat32m4_t test_vfncvt_f_f_w_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f32m4_tum(vm, vd, vs2, vl); } -vint8mf8_t test_vfncvt_x_f_w_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { +vint8mf8_t test_vfncvt_x_f_w_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8mf8_tumu(vm, vd, vs2, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { +vint8mf4_t test_vfncvt_x_f_w_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8mf4_tumu(vm, vd, vs2, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { +vint8mf2_t test_vfncvt_x_f_w_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8mf2_tumu(vm, vd, vs2, vl); } -vint8m1_t test_vfncvt_x_f_w_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { +vint8m1_t test_vfncvt_x_f_w_i8m1_tumu(vbool8_t vm, vint8m1_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8m1_tumu(vm, vd, vs2, vl); } -vint8m2_t test_vfncvt_x_f_w_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { +vint8m2_t test_vfncvt_x_f_w_i8m2_tumu(vbool4_t vm, vint8m2_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8m2_tumu(vm, vd, vs2, vl); } -vint8m4_t test_vfncvt_x_f_w_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { +vint8m4_t test_vfncvt_x_f_w_i8m4_tumu(vbool2_t vm, vint8m4_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8m4_tumu(vm, vd, vs2, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8mf8_tumu(vm, vd, vs2, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8mf4_tumu(vm, vd, vs2, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8mf2_tumu(vm, vd, vs2, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { +vuint8m1_t test_vfncvt_xu_f_w_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8m1_tumu(vm, vd, vs2, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { +vuint8m2_t test_vfncvt_xu_f_w_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8m2_tumu(vm, vd, vs2, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { +vuint8m4_t test_vfncvt_xu_f_w_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8m4_tumu(vm, vd, vs2, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vint16mf4_t test_vfncvt_x_f_w_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i16mf4_tumu(vm, vd, vs2, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vint16mf2_t test_vfncvt_x_f_w_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i16mf2_tumu(vm, vd, vs2, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vint16m1_t test_vfncvt_x_f_w_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i16m1_tumu(vm, vd, vs2, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vint16m2_t test_vfncvt_x_f_w_i16m2_tumu(vbool8_t vm, vint16m2_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i16m2_tumu(vm, vd, vs2, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vint16m4_t test_vfncvt_x_f_w_i16m4_tumu(vbool4_t vm, vint16m4_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i16m4_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u16mf4_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u16mf2_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vuint16m1_t test_vfncvt_xu_f_w_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u16m1_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vuint16m2_t test_vfncvt_xu_f_w_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u16m2_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vuint16m4_t test_vfncvt_xu_f_w_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u16m4_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vint32mf2_t vs2, size_t vl) { +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vint32mf2_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f16mf4_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vint32m1_t vs2, size_t vl) { +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vint32m1_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f16mf2_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vint32m2_t vs2, size_t vl) { +vfloat16m1_t test_vfncvt_f_x_w_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vint32m2_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f16m1_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vint32m4_t vs2, size_t vl) { +vfloat16m2_t test_vfncvt_f_x_w_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vint32m4_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f16m2_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vint32m8_t vs2, size_t vl) { +vfloat16m4_t test_vfncvt_f_x_w_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vint32m8_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f16m4_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vuint32mf2_t vs2, size_t vl) { +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vuint32mf2_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f16mf4_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vuint32m1_t vs2, size_t vl) { +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vuint32m1_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f16mf2_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vuint32m2_t vs2, size_t vl) { +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vuint32m2_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f16m1_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vuint32m4_t vs2, size_t vl) { +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vuint32m4_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f16m2_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vuint32m8_t vs2, size_t vl) { +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vuint32m8_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f16m4_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f16mf4_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f16mf2_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat16m1_t test_vfncvt_f_f_w_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f16m1_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat16m2_t test_vfncvt_f_f_w_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f16m2_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat16m4_t test_vfncvt_f_f_w_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f16m4_tumu(vm, vd, vs2, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vint32mf2_t test_vfncvt_x_f_w_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i32mf2_tumu(vm, vd, vs2, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vint32m1_t test_vfncvt_x_f_w_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i32m1_tumu(vm, vd, vs2, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vint32m2_t test_vfncvt_x_f_w_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i32m2_tumu(vm, vd, vs2, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vint32m4_t test_vfncvt_x_f_w_i32m4_tumu(vbool8_t vm, vint32m4_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i32m4_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u32mf2_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vuint32m1_t test_vfncvt_xu_f_w_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u32m1_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vuint32m2_t test_vfncvt_xu_f_w_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u32m2_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vuint32m4_t test_vfncvt_xu_f_w_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u32m4_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vint64m1_t vs2, size_t vl) { +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vint64m1_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f32mf2_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vint64m2_t vs2, size_t vl) { +vfloat32m1_t test_vfncvt_f_x_w_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vint64m2_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f32m1_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vint64m4_t vs2, size_t vl) { +vfloat32m2_t test_vfncvt_f_x_w_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vint64m4_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f32m2_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vint64m8_t vs2, size_t vl) { +vfloat32m4_t test_vfncvt_f_x_w_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vint64m8_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f32m4_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vuint64m1_t vs2, size_t vl) { +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vuint64m1_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f32mf2_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vuint64m2_t vs2, size_t vl) { +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vuint64m2_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f32m1_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vuint64m4_t vs2, size_t vl) { +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vuint64m4_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f32m2_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vuint64m8_t vs2, size_t vl) { +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vuint64m8_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f32m4_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f32mf2_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat32m1_t test_vfncvt_f_f_w_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f32m1_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat32m2_t test_vfncvt_f_f_w_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f32m2_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat32m4_t test_vfncvt_f_f_w_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f32m4_tumu(vm, vd, vs2, vl); } -vint8mf8_t test_vfncvt_x_f_w_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { +vint8mf8_t test_vfncvt_x_f_w_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8mf8_mu(vm, vd, vs2, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { +vint8mf4_t test_vfncvt_x_f_w_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8mf4_mu(vm, vd, vs2, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { +vint8mf2_t test_vfncvt_x_f_w_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8mf2_mu(vm, vd, vs2, vl); } -vint8m1_t test_vfncvt_x_f_w_i8m1_mu(vbool8_t vm, vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { +vint8m1_t test_vfncvt_x_f_w_i8m1_mu(vbool8_t vm, vint8m1_t vd, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfncvt_x_f_w_i8m1_mu(vm, vd, vs2, vl); } -vint8m2_t test_vfncvt_x_f_w_i8m2_mu(vbool4_t vm, vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { +vint8m2_t test_vfncvt_x_f_w_i8m2_mu(vbool4_t vm, vint8m2_t vd, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfncvt_x_f_w_i8m2_mu(vm, vd, vs2, vl); } -vint8m4_t test_vfncvt_x_f_w_i8m4_mu(vbool2_t vm, vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { +vint8m4_t test_vfncvt_x_f_w_i8m4_mu(vbool2_t vm, vint8m4_t vd, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfncvt_x_f_w_i8m4_mu(vm, vd, vs2, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8mf8_mu(vm, vd, vs2, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8mf4_mu(vm, vd, vs2, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8mf2_mu(vm, vd, vs2, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { +vuint8m1_t test_vfncvt_xu_f_w_u8m1_mu(vbool8_t vm, vuint8m1_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8m1_mu(vm, vd, vs2, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { +vuint8m2_t test_vfncvt_xu_f_w_u8m2_mu(vbool4_t vm, vuint8m2_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8m2_mu(vm, vd, vs2, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { +vuint8m4_t test_vfncvt_xu_f_w_u8m4_mu(vbool2_t vm, vuint8m4_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8m4_mu(vm, vd, vs2, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vint16mf4_t test_vfncvt_x_f_w_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i16mf4_mu(vm, vd, vs2, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vint16mf2_t test_vfncvt_x_f_w_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i16mf2_mu(vm, vd, vs2, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1_mu(vbool16_t vm, vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vint16m1_t test_vfncvt_x_f_w_i16m1_mu(vbool16_t vm, vint16m1_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i16m1_mu(vm, vd, vs2, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2_mu(vbool8_t vm, vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vint16m2_t test_vfncvt_x_f_w_i16m2_mu(vbool8_t vm, vint16m2_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i16m2_mu(vm, vd, vs2, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4_mu(vbool4_t vm, vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vint16m4_t test_vfncvt_x_f_w_i16m4_mu(vbool4_t vm, vint16m4_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i16m4_mu(vm, vd, vs2, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u16mf4_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u16mf2_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vuint16m1_t test_vfncvt_xu_f_w_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u16m1_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vuint16m2_t test_vfncvt_xu_f_w_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u16m2_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vuint16m4_t test_vfncvt_xu_f_w_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u16m4_mu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vint32mf2_t vs2, size_t vl) { +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vint32mf2_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f16mf4_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vint32m1_t vs2, size_t vl) { +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vint32m1_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f16mf2_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vint32m2_t vs2, size_t vl) { +vfloat16m1_t test_vfncvt_f_x_w_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vint32m2_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f16m1_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vint32m4_t vs2, size_t vl) { +vfloat16m2_t test_vfncvt_f_x_w_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vint32m4_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f16m2_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vint32m8_t vs2, size_t vl) { +vfloat16m4_t test_vfncvt_f_x_w_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vint32m8_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f16m4_mu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vuint32mf2_t vs2, size_t vl) { +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vuint32mf2_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f16mf4_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vuint32m1_t vs2, size_t vl) { +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vuint32m1_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f16mf2_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vuint32m2_t vs2, size_t vl) { +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vuint32m2_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f16m1_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vuint32m4_t vs2, size_t vl) { +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vuint32m4_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f16m2_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vuint32m8_t vs2, size_t vl) { +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vuint32m8_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f16m4_mu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f16mf4_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f16mf2_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat16m1_t test_vfncvt_f_f_w_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f16m1_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat16m2_t test_vfncvt_f_f_w_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f16m2_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat16m4_t test_vfncvt_f_f_w_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f16m4_mu(vm, vd, vs2, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vint32mf2_t test_vfncvt_x_f_w_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i32mf2_mu(vm, vd, vs2, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1_mu(vbool32_t vm, vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vint32m1_t test_vfncvt_x_f_w_i32m1_mu(vbool32_t vm, vint32m1_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i32m1_mu(vm, vd, vs2, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2_mu(vbool16_t vm, vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vint32m2_t test_vfncvt_x_f_w_i32m2_mu(vbool16_t vm, vint32m2_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i32m2_mu(vm, vd, vs2, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4_mu(vbool8_t vm, vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vint32m4_t test_vfncvt_x_f_w_i32m4_mu(vbool8_t vm, vint32m4_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i32m4_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u32mf2_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vuint32m1_t test_vfncvt_xu_f_w_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u32m1_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vuint32m2_t test_vfncvt_xu_f_w_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u32m2_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vuint32m4_t test_vfncvt_xu_f_w_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u32m4_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vint64m1_t vs2, size_t vl) { +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vint64m1_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f32mf2_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vint64m2_t vs2, size_t vl) { +vfloat32m1_t test_vfncvt_f_x_w_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vint64m2_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f32m1_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vint64m4_t vs2, size_t vl) { +vfloat32m2_t test_vfncvt_f_x_w_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vint64m4_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f32m2_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vint64m8_t vs2, size_t vl) { +vfloat32m4_t test_vfncvt_f_x_w_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vint64m8_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f32m4_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vuint64m1_t vs2, size_t vl) { +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vuint64m1_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f32mf2_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vuint64m2_t vs2, size_t vl) { +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vuint64m2_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f32m1_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vuint64m4_t vs2, size_t vl) { +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vuint64m4_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f32m2_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vuint64m8_t vs2, size_t vl) { +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vuint64m8_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f32m4_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f32mf2_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat32m1_t test_vfncvt_f_f_w_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f32m1_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat32m2_t test_vfncvt_f_f_w_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f32m2_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat32m4_t test_vfncvt_f_f_w_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f32m4_mu(vm, vd, vs2, vl); } -vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm_tu(vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { +vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm_tu(vint8mf8_t vd, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfncvt_x_f_w_i8mf8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm_tu(vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { +vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm_tu(vint8mf4_t vd, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfncvt_x_f_w_i8mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm_tu(vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { +vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm_tu(vint8mf2_t vd, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfncvt_x_f_w_i8mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m1_t test_vfncvt_x_f_w_i8m1_rm_tu(vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { +vint8m1_t test_vfncvt_x_f_w_i8m1_rm_tu(vint8m1_t vd, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfncvt_x_f_w_i8m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m2_t test_vfncvt_x_f_w_i8m2_rm_tu(vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { +vint8m2_t test_vfncvt_x_f_w_i8m2_rm_tu(vint8m2_t vd, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfncvt_x_f_w_i8m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m4_t test_vfncvt_x_f_w_i8m4_rm_tu(vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { +vint8m4_t test_vfncvt_x_f_w_i8m4_rm_tu(vint8m4_t vd, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfncvt_x_f_w_i8m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm_tu(vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm_tu(vuint8mf8_t vd, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfncvt_xu_f_w_u8mf8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm_tu(vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm_tu(vuint8mf4_t vd, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfncvt_xu_f_w_u8mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm_tu(vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm_tu(vuint8mf2_t vd, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfncvt_xu_f_w_u8mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm_tu(vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { +vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm_tu(vuint8m1_t vd, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfncvt_xu_f_w_u8m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm_tu(vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { +vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm_tu(vuint8m2_t vd, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfncvt_xu_f_w_u8m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm_tu(vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { +vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm_tu(vuint8m4_t vd, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfncvt_xu_f_w_u8m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm_tu(vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm_tu(vint16mf4_t vd, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfncvt_x_f_w_i16mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm_tu(vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm_tu(vint16mf2_t vd, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfncvt_x_f_w_i16mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1_rm_tu(vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vint16m1_t test_vfncvt_x_f_w_i16m1_rm_tu(vint16m1_t vd, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfncvt_x_f_w_i16m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2_rm_tu(vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vint16m2_t test_vfncvt_x_f_w_i16m2_rm_tu(vint16m2_t vd, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfncvt_x_f_w_i16m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4_rm_tu(vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vint16m4_t test_vfncvt_x_f_w_i16m4_rm_tu(vint16m4_t vd, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfncvt_x_f_w_i16m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm_tu(vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm_tu(vuint16mf4_t vd, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfncvt_xu_f_w_u16mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm_tu(vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm_tu(vuint16mf2_t vd, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfncvt_xu_f_w_u16mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm_tu(vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm_tu(vuint16m1_t vd, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfncvt_xu_f_w_u16m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm_tu(vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm_tu(vuint16m2_t vd, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfncvt_xu_f_w_u16m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm_tu(vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm_tu(vuint16m4_t vd, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfncvt_xu_f_w_u16m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm_tu(vfloat16mf4_t vd, vint32mf2_t vs2, size_t vl) { +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm_tu(vfloat16mf4_t vd, vint32mf2_t vs2, + size_t vl) { return __riscv_vfncvt_f_x_w_f16mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm_tu(vfloat16mf2_t vd, vint32m1_t vs2, size_t vl) { +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm_tu(vfloat16mf2_t vd, vint32m1_t vs2, + size_t vl) { return __riscv_vfncvt_f_x_w_f16mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm_tu(vfloat16m1_t vd, vint32m2_t vs2, size_t vl) { +vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm_tu(vfloat16m1_t vd, vint32m2_t vs2, + size_t vl) { return __riscv_vfncvt_f_x_w_f16m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm_tu(vfloat16m2_t vd, vint32m4_t vs2, size_t vl) { +vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm_tu(vfloat16m2_t vd, vint32m4_t vs2, + size_t vl) { return __riscv_vfncvt_f_x_w_f16m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm_tu(vfloat16m4_t vd, vint32m8_t vs2, size_t vl) { +vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm_tu(vfloat16m4_t vd, vint32m8_t vs2, + size_t vl) { return __riscv_vfncvt_f_x_w_f16m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm_tu(vfloat16mf4_t vd, vuint32mf2_t vs2, size_t vl) { +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm_tu(vfloat16mf4_t vd, + vuint32mf2_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f16mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm_tu(vfloat16mf2_t vd, vuint32m1_t vs2, size_t vl) { +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm_tu(vfloat16mf2_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vfncvt_f_xu_w_f16mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm_tu(vfloat16m1_t vd, vuint32m2_t vs2, size_t vl) { +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm_tu(vfloat16m1_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vfncvt_f_xu_w_f16m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm_tu(vfloat16m2_t vd, vuint32m4_t vs2, size_t vl) { +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm_tu(vfloat16m2_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vfncvt_f_xu_w_f16m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm_tu(vfloat16m4_t vd, vuint32m8_t vs2, size_t vl) { +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm_tu(vfloat16m4_t vd, vuint32m8_t vs2, + size_t vl) { return __riscv_vfncvt_f_xu_w_f16m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm_tu(vfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f16mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfncvt_f_f_w_f16mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm_tu(vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm_tu(vfloat16m1_t vd, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfncvt_f_f_w_f16m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm_tu(vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm_tu(vfloat16m2_t vd, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfncvt_f_f_w_f16m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm_tu(vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm_tu(vfloat16m4_t vd, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfncvt_f_f_w_f16m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm_tu(vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm_tu(vint32mf2_t vd, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfncvt_x_f_w_i32mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1_rm_tu(vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vint32m1_t test_vfncvt_x_f_w_i32m1_rm_tu(vint32m1_t vd, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfncvt_x_f_w_i32m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2_rm_tu(vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vint32m2_t test_vfncvt_x_f_w_i32m2_rm_tu(vint32m2_t vd, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfncvt_x_f_w_i32m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4_rm_tu(vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vint32m4_t test_vfncvt_x_f_w_i32m4_rm_tu(vint32m4_t vd, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfncvt_x_f_w_i32m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm_tu(vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm_tu(vuint32mf2_t vd, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfncvt_xu_f_w_u32mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm_tu(vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm_tu(vuint32m1_t vd, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfncvt_xu_f_w_u32m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm_tu(vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm_tu(vuint32m2_t vd, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfncvt_xu_f_w_u32m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm_tu(vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm_tu(vuint32m4_t vd, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfncvt_xu_f_w_u32m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm_tu(vfloat32mf2_t vd, vint64m1_t vs2, size_t vl) { +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm_tu(vfloat32mf2_t vd, vint64m1_t vs2, + size_t vl) { return __riscv_vfncvt_f_x_w_f32mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm_tu(vfloat32m1_t vd, vint64m2_t vs2, size_t vl) { +vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm_tu(vfloat32m1_t vd, vint64m2_t vs2, + size_t vl) { return __riscv_vfncvt_f_x_w_f32m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm_tu(vfloat32m2_t vd, vint64m4_t vs2, size_t vl) { +vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm_tu(vfloat32m2_t vd, vint64m4_t vs2, + size_t vl) { return __riscv_vfncvt_f_x_w_f32m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm_tu(vfloat32m4_t vd, vint64m8_t vs2, size_t vl) { +vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm_tu(vfloat32m4_t vd, vint64m8_t vs2, + size_t vl) { return __riscv_vfncvt_f_x_w_f32m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm_tu(vfloat32mf2_t vd, vuint64m1_t vs2, size_t vl) { +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm_tu(vfloat32mf2_t vd, vuint64m1_t vs2, + size_t vl) { return __riscv_vfncvt_f_xu_w_f32mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm_tu(vfloat32m1_t vd, vuint64m2_t vs2, size_t vl) { +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm_tu(vfloat32m1_t vd, vuint64m2_t vs2, + size_t vl) { return __riscv_vfncvt_f_xu_w_f32m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm_tu(vfloat32m2_t vd, vuint64m4_t vs2, size_t vl) { +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm_tu(vfloat32m2_t vd, vuint64m4_t vs2, + size_t vl) { return __riscv_vfncvt_f_xu_w_f32m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm_tu(vfloat32m4_t vd, vuint64m8_t vs2, size_t vl) { +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm_tu(vfloat32m4_t vd, vuint64m8_t vs2, + size_t vl) { return __riscv_vfncvt_f_xu_w_f32m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfncvt_f_f_w_f32mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm_tu(vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm_tu(vfloat32m1_t vd, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfncvt_f_f_w_f32m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm_tu(vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm_tu(vfloat32m2_t vd, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfncvt_f_f_w_f32m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm_tu(vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm_tu(vfloat32m4_t vd, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfncvt_f_f_w_f32m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm_tum(vbool64_t vm, vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { +vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm_tum(vbool64_t vm, vint8mf8_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8mf8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm_tum(vbool32_t vm, vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { +vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm_tum(vbool32_t vm, vint8mf4_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm_tum(vbool16_t vm, vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { +vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm_tum(vbool16_t vm, vint8mf2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m1_t test_vfncvt_x_f_w_i8m1_rm_tum(vbool8_t vm, vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { +vint8m1_t test_vfncvt_x_f_w_i8m1_rm_tum(vbool8_t vm, vint8m1_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m2_t test_vfncvt_x_f_w_i8m2_rm_tum(vbool4_t vm, vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { +vint8m2_t test_vfncvt_x_f_w_i8m2_rm_tum(vbool4_t vm, vint8m2_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m4_t test_vfncvt_x_f_w_i8m4_rm_tum(vbool2_t vm, vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { +vint8m4_t test_vfncvt_x_f_w_i8m4_rm_tum(vbool2_t vm, vint8m4_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm_tum(vbool64_t vm, vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm_tum(vbool64_t vm, vuint8mf8_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8mf8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm_tum(vbool32_t vm, vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm_tum(vbool32_t vm, vuint8mf4_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm_tum(vbool16_t vm, vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm_tum(vbool16_t vm, vuint8mf2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm_tum(vbool8_t vm, vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { +vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm_tum(vbool8_t vm, vuint8m1_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm_tum(vbool4_t vm, vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { +vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm_tum(vbool4_t vm, vuint8m2_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm_tum(vbool2_t vm, vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { +vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm_tum(vbool2_t vm, vuint8m4_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm_tum(vbool64_t vm, vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm_tum(vbool64_t vm, vint16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i16mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm_tum(vbool32_t vm, vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm_tum(vbool32_t vm, vint16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i16mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1_rm_tum(vbool16_t vm, vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vint16m1_t test_vfncvt_x_f_w_i16m1_rm_tum(vbool16_t vm, vint16m1_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i16m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2_rm_tum(vbool8_t vm, vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vint16m2_t test_vfncvt_x_f_w_i16m2_rm_tum(vbool8_t vm, vint16m2_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i16m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4_rm_tum(vbool4_t vm, vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vint16m4_t test_vfncvt_x_f_w_i16m4_rm_tum(vbool4_t vm, vint16m4_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i16m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm_tum(vbool64_t vm, vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm_tum(vbool64_t vm, vuint16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u16mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm_tum(vbool32_t vm, vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm_tum(vbool32_t vm, vuint16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u16mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm_tum(vbool16_t vm, vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm_tum(vbool16_t vm, vuint16m1_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u16m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm_tum(vbool8_t vm, vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm_tum(vbool8_t vm, vuint16m2_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u16m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm_tum(vbool4_t vm, vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm_tum(vbool4_t vm, vuint16m4_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u16m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vint32mf2_t vs2, size_t vl) { +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + vint32mf2_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f16mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vint32m1_t vs2, size_t vl) { +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + vint32m1_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f16mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vint32m2_t vs2, size_t vl) { +vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + vint32m2_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f16m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vint32m4_t vs2, size_t vl) { +vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + vint32m4_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f16m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vint32m8_t vs2, size_t vl) { +vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + vint32m8_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f16m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vuint32mf2_t vs2, size_t vl) { +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + vuint32mf2_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f16mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vuint32m1_t vs2, size_t vl) { +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + vuint32m1_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f16mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vuint32m2_t vs2, size_t vl) { +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + vuint32m2_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f16m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vuint32m4_t vs2, size_t vl) { +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + vuint32m4_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f16m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vuint32m8_t vs2, size_t vl) { +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + vuint32m8_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f16m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f16mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f16mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f16m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f16m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f16m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm_tum(vbool64_t vm, vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm_tum(vbool64_t vm, vint32mf2_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i32mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1_rm_tum(vbool32_t vm, vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vint32m1_t test_vfncvt_x_f_w_i32m1_rm_tum(vbool32_t vm, vint32m1_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i32m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2_rm_tum(vbool16_t vm, vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vint32m2_t test_vfncvt_x_f_w_i32m2_rm_tum(vbool16_t vm, vint32m2_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i32m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4_rm_tum(vbool8_t vm, vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vint32m4_t test_vfncvt_x_f_w_i32m4_rm_tum(vbool8_t vm, vint32m4_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i32m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm_tum(vbool64_t vm, vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm_tum(vbool64_t vm, vuint32mf2_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u32mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm_tum(vbool32_t vm, vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm_tum(vbool32_t vm, vuint32m1_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u32m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm_tum(vbool16_t vm, vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm_tum(vbool16_t vm, vuint32m2_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u32m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm_tum(vbool8_t vm, vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm_tum(vbool8_t vm, vuint32m4_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u32m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vint64m1_t vs2, size_t vl) { +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vint64m1_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f32mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vint64m2_t vs2, size_t vl) { +vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vint64m2_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f32m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vint64m4_t vs2, size_t vl) { +vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vint64m4_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f32m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vint64m8_t vs2, size_t vl) { +vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vint64m8_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f32m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vuint64m1_t vs2, size_t vl) { +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vuint64m1_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f32mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vuint64m2_t vs2, size_t vl) { +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vuint64m2_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f32m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vuint64m4_t vs2, size_t vl) { +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vuint64m4_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f32m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vuint64m8_t vs2, size_t vl) { +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vuint64m8_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f32m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f32mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f32m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f32m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f32m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm_tumu(vbool64_t vm, vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { +vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm_tumu(vbool64_t vm, vint8mf8_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8mf8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm_tumu(vbool32_t vm, vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { +vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm_tumu(vbool32_t vm, vint8mf4_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm_tumu(vbool16_t vm, vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { +vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm_tumu(vbool16_t vm, vint8mf2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m1_t test_vfncvt_x_f_w_i8m1_rm_tumu(vbool8_t vm, vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { +vint8m1_t test_vfncvt_x_f_w_i8m1_rm_tumu(vbool8_t vm, vint8m1_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m2_t test_vfncvt_x_f_w_i8m2_rm_tumu(vbool4_t vm, vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { +vint8m2_t test_vfncvt_x_f_w_i8m2_rm_tumu(vbool4_t vm, vint8m2_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m4_t test_vfncvt_x_f_w_i8m4_rm_tumu(vbool2_t vm, vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { +vint8m4_t test_vfncvt_x_f_w_i8m4_rm_tumu(vbool2_t vm, vint8m4_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm_tumu(vbool64_t vm, vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm_tumu(vbool64_t vm, vuint8mf8_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8mf8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm_tumu(vbool32_t vm, vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm_tumu(vbool32_t vm, vuint8mf4_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm_tumu(vbool16_t vm, vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm_tumu(vbool16_t vm, vuint8mf2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm_tumu(vbool8_t vm, vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { +vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm_tumu(vbool8_t vm, vuint8m1_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm_tumu(vbool4_t vm, vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { +vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm_tumu(vbool4_t vm, vuint8m2_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm_tumu(vbool2_t vm, vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { +vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm_tumu(vbool2_t vm, vuint8m4_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm_tumu(vbool64_t vm, vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm_tumu(vbool64_t vm, vint16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i16mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm_tumu(vbool32_t vm, vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm_tumu(vbool32_t vm, vint16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i16mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1_rm_tumu(vbool16_t vm, vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vint16m1_t test_vfncvt_x_f_w_i16m1_rm_tumu(vbool16_t vm, vint16m1_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i16m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2_rm_tumu(vbool8_t vm, vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vint16m2_t test_vfncvt_x_f_w_i16m2_rm_tumu(vbool8_t vm, vint16m2_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i16m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4_rm_tumu(vbool4_t vm, vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vint16m4_t test_vfncvt_x_f_w_i16m4_rm_tumu(vbool4_t vm, vint16m4_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i16m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm_tumu(vbool64_t vm, vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm_tumu(vbool64_t vm, vuint16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u16mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm_tumu(vbool32_t vm, vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm_tumu(vbool32_t vm, vuint16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u16mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm_tumu(vbool16_t vm, vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm_tumu(vbool16_t vm, vuint16m1_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u16m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm_tumu(vbool8_t vm, vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm_tumu(vbool8_t vm, vuint16m2_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u16m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm_tumu(vbool4_t vm, vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm_tumu(vbool4_t vm, vuint16m4_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u16m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vint32mf2_t vs2, size_t vl) { +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + vint32mf2_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f16mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vint32m1_t vs2, size_t vl) { +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + vint32m1_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f16mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vint32m2_t vs2, size_t vl) { +vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + vint32m2_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f16m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vint32m4_t vs2, size_t vl) { +vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + vint32m4_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f16m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vint32m8_t vs2, size_t vl) { +vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + vint32m8_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f16m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vuint32mf2_t vs2, size_t vl) { +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + vuint32mf2_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f16mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vuint32m1_t vs2, size_t vl) { +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + vuint32m1_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f16mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vuint32m2_t vs2, size_t vl) { +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + vuint32m2_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f16m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vuint32m4_t vs2, size_t vl) { +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + vuint32m4_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f16m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vuint32m8_t vs2, size_t vl) { +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + vuint32m8_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f16m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f16mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f16mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f16m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f16m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f16m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm_tumu(vbool64_t vm, vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm_tumu(vbool64_t vm, vint32mf2_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i32mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1_rm_tumu(vbool32_t vm, vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vint32m1_t test_vfncvt_x_f_w_i32m1_rm_tumu(vbool32_t vm, vint32m1_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i32m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2_rm_tumu(vbool16_t vm, vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vint32m2_t test_vfncvt_x_f_w_i32m2_rm_tumu(vbool16_t vm, vint32m2_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i32m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4_rm_tumu(vbool8_t vm, vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vint32m4_t test_vfncvt_x_f_w_i32m4_rm_tumu(vbool8_t vm, vint32m4_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i32m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm_tumu(vbool64_t vm, vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm_tumu(vbool64_t vm, vuint32mf2_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u32mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm_tumu(vbool32_t vm, vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm_tumu(vbool32_t vm, vuint32m1_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u32m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm_tumu(vbool16_t vm, vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm_tumu(vbool16_t vm, vuint32m2_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u32m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm_tumu(vbool8_t vm, vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm_tumu(vbool8_t vm, vuint32m4_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u32m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vint64m1_t vs2, size_t vl) { +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vint64m1_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f32mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vint64m2_t vs2, size_t vl) { +vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vint64m2_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f32m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vint64m4_t vs2, size_t vl) { +vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vint64m4_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f32m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vint64m8_t vs2, size_t vl) { +vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vint64m8_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f32m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vuint64m1_t vs2, size_t vl) { +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vuint64m1_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f32mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vuint64m2_t vs2, size_t vl) { +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vuint64m2_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f32m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vuint64m4_t vs2, size_t vl) { +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vuint64m4_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f32m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vuint64m8_t vs2, size_t vl) { +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vuint64m8_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f32m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f32mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f32m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f32m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f32m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm_mu(vbool64_t vm, vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { +vint8mf8_t test_vfncvt_x_f_w_i8mf8_rm_mu(vbool64_t vm, vint8mf8_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8mf8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm_mu(vbool32_t vm, vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { +vint8mf4_t test_vfncvt_x_f_w_i8mf4_rm_mu(vbool32_t vm, vint8mf4_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm_mu(vbool16_t vm, vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { +vint8mf2_t test_vfncvt_x_f_w_i8mf2_rm_mu(vbool16_t vm, vint8mf2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m1_t test_vfncvt_x_f_w_i8m1_rm_mu(vbool8_t vm, vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { +vint8m1_t test_vfncvt_x_f_w_i8m1_rm_mu(vbool8_t vm, vint8m1_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m2_t test_vfncvt_x_f_w_i8m2_rm_mu(vbool4_t vm, vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { +vint8m2_t test_vfncvt_x_f_w_i8m2_rm_mu(vbool4_t vm, vint8m2_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint8m4_t test_vfncvt_x_f_w_i8m4_rm_mu(vbool2_t vm, vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { +vint8m4_t test_vfncvt_x_f_w_i8m4_rm_mu(vbool2_t vm, vint8m4_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i8m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm_mu(vbool64_t vm, vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint8mf8_t test_vfncvt_xu_f_w_u8mf8_rm_mu(vbool64_t vm, vuint8mf8_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8mf8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm_mu(vbool32_t vm, vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint8mf4_t test_vfncvt_xu_f_w_u8mf4_rm_mu(vbool32_t vm, vuint8mf4_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm_mu(vbool16_t vm, vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { +vuint8mf2_t test_vfncvt_xu_f_w_u8mf2_rm_mu(vbool16_t vm, vuint8mf2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm_mu(vbool8_t vm, vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { +vuint8m1_t test_vfncvt_xu_f_w_u8m1_rm_mu(vbool8_t vm, vuint8m1_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm_mu(vbool4_t vm, vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { +vuint8m2_t test_vfncvt_xu_f_w_u8m2_rm_mu(vbool4_t vm, vuint8m2_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm_mu(vbool2_t vm, vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { +vuint8m4_t test_vfncvt_xu_f_w_u8m4_rm_mu(vbool2_t vm, vuint8m4_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u8m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm_mu(vbool64_t vm, vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vint16mf4_t test_vfncvt_x_f_w_i16mf4_rm_mu(vbool64_t vm, vint16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i16mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm_mu(vbool32_t vm, vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vint16mf2_t test_vfncvt_x_f_w_i16mf2_rm_mu(vbool32_t vm, vint16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i16mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m1_t test_vfncvt_x_f_w_i16m1_rm_mu(vbool16_t vm, vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vint16m1_t test_vfncvt_x_f_w_i16m1_rm_mu(vbool16_t vm, vint16m1_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i16m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m2_t test_vfncvt_x_f_w_i16m2_rm_mu(vbool8_t vm, vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vint16m2_t test_vfncvt_x_f_w_i16m2_rm_mu(vbool8_t vm, vint16m2_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i16m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint16m4_t test_vfncvt_x_f_w_i16m4_rm_mu(vbool4_t vm, vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vint16m4_t test_vfncvt_x_f_w_i16m4_rm_mu(vbool4_t vm, vint16m4_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i16m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm_mu(vbool64_t vm, vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint16mf4_t test_vfncvt_xu_f_w_u16mf4_rm_mu(vbool64_t vm, vuint16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u16mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm_mu(vbool32_t vm, vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vuint16mf2_t test_vfncvt_xu_f_w_u16mf2_rm_mu(vbool32_t vm, vuint16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u16mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm_mu(vbool16_t vm, vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vuint16m1_t test_vfncvt_xu_f_w_u16m1_rm_mu(vbool16_t vm, vuint16m1_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u16m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm_mu(vbool8_t vm, vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vuint16m2_t test_vfncvt_xu_f_w_u16m2_rm_mu(vbool8_t vm, vuint16m2_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u16m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm_mu(vbool4_t vm, vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vuint16m4_t test_vfncvt_xu_f_w_u16m4_rm_mu(vbool4_t vm, vuint16m4_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u16m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vint32mf2_t vs2, size_t vl) { +vfloat16mf4_t test_vfncvt_f_x_w_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + vint32mf2_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f16mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vint32m1_t vs2, size_t vl) { +vfloat16mf2_t test_vfncvt_f_x_w_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + vint32m1_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f16mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vint32m2_t vs2, size_t vl) { +vfloat16m1_t test_vfncvt_f_x_w_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + vint32m2_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f16m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vint32m4_t vs2, size_t vl) { +vfloat16m2_t test_vfncvt_f_x_w_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + vint32m4_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f16m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vint32m8_t vs2, size_t vl) { +vfloat16m4_t test_vfncvt_f_x_w_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + vint32m8_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f16m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vuint32mf2_t vs2, size_t vl) { +vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + vuint32mf2_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f16mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vuint32m1_t vs2, size_t vl) { +vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + vuint32m1_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f16mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vuint32m2_t vs2, size_t vl) { +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + vuint32m2_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f16m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vuint32m4_t vs2, size_t vl) { +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + vuint32m4_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f16m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vuint32m8_t vs2, size_t vl) { +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + vuint32m8_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f16m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat16mf4_t test_vfncvt_f_f_w_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f16mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat16mf2_t test_vfncvt_f_f_w_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f16mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat16m1_t test_vfncvt_f_f_w_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f16m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat16m2_t test_vfncvt_f_f_w_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f16m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat16m4_t test_vfncvt_f_f_w_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f16m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm_mu(vbool64_t vm, vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vint32mf2_t test_vfncvt_x_f_w_i32mf2_rm_mu(vbool64_t vm, vint32mf2_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i32mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfncvt_x_f_w_i32m1_rm_mu(vbool32_t vm, vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vint32m1_t test_vfncvt_x_f_w_i32m1_rm_mu(vbool32_t vm, vint32m1_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i32m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfncvt_x_f_w_i32m2_rm_mu(vbool16_t vm, vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vint32m2_t test_vfncvt_x_f_w_i32m2_rm_mu(vbool16_t vm, vint32m2_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i32m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfncvt_x_f_w_i32m4_rm_mu(vbool8_t vm, vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vint32m4_t test_vfncvt_x_f_w_i32m4_rm_mu(vbool8_t vm, vint32m4_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfncvt_x_f_w_i32m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm_mu(vbool64_t vm, vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vuint32mf2_t test_vfncvt_xu_f_w_u32mf2_rm_mu(vbool64_t vm, vuint32mf2_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u32mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm_mu(vbool32_t vm, vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vuint32m1_t test_vfncvt_xu_f_w_u32m1_rm_mu(vbool32_t vm, vuint32m1_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u32m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm_mu(vbool16_t vm, vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vuint32m2_t test_vfncvt_xu_f_w_u32m2_rm_mu(vbool16_t vm, vuint32m2_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u32m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm_mu(vbool8_t vm, vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vuint32m4_t test_vfncvt_xu_f_w_u32m4_rm_mu(vbool8_t vm, vuint32m4_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfncvt_xu_f_w_u32m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vint64m1_t vs2, size_t vl) { +vfloat32mf2_t test_vfncvt_f_x_w_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vint64m1_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f32mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vint64m2_t vs2, size_t vl) { +vfloat32m1_t test_vfncvt_f_x_w_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vint64m2_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f32m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vint64m4_t vs2, size_t vl) { +vfloat32m2_t test_vfncvt_f_x_w_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vint64m4_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f32m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vint64m8_t vs2, size_t vl) { +vfloat32m4_t test_vfncvt_f_x_w_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vint64m8_t vs2, size_t vl) { return __riscv_vfncvt_f_x_w_f32m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vuint64m1_t vs2, size_t vl) { +vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vuint64m1_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f32mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vuint64m2_t vs2, size_t vl) { +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vuint64m2_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f32m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vuint64m4_t vs2, size_t vl) { +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vuint64m4_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f32m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vuint64m8_t vs2, size_t vl) { +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vuint64m8_t vs2, size_t vl) { return __riscv_vfncvt_f_xu_w_f32m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat32mf2_t test_vfncvt_f_f_w_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f32mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat32m1_t test_vfncvt_f_f_w_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f32m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat32m2_t test_vfncvt_f_f_w_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f32m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat32m4_t test_vfncvt_f_f_w_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfncvt_f_f_w_f32m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfncvt_rod.c b/auto-generated/policy_funcs/llvm-api-tests/vfncvt_rod.c index 2425f766c..868f29c52 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfncvt_rod.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfncvt_rod.c @@ -1,151 +1,187 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tu(vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tu(vfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfncvt_rod_f_f_w_f16mf4_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tu(vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tu(vfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfncvt_rod_f_f_w_f16mf2_tu(vd, vs2, vl); } -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tu(vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tu(vfloat16m1_t vd, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfncvt_rod_f_f_w_f16m1_tu(vd, vs2, vl); } -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tu(vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tu(vfloat16m2_t vd, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfncvt_rod_f_f_w_f16m2_tu(vd, vs2, vl); } -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tu(vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tu(vfloat16m4_t vd, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfncvt_rod_f_f_w_f16m4_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tu(vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tu(vfloat32mf2_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfncvt_rod_f_f_w_f32mf2_tu(vd, vs2, vl); } -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tu(vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tu(vfloat32m1_t vd, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfncvt_rod_f_f_w_f32m1_tu(vd, vs2, vl); } -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tu(vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tu(vfloat32m2_t vd, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfncvt_rod_f_f_w_f32m2_tu(vd, vs2, vl); } -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tu(vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tu(vfloat32m4_t vd, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfncvt_rod_f_f_w_f32m4_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfncvt_rod_f_f_w_f16mf4_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfncvt_rod_f_f_w_f16mf2_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfncvt_rod_f_f_w_f16m1_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfncvt_rod_f_f_w_f16m2_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfncvt_rod_f_f_w_f16m4_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfncvt_rod_f_f_w_f32mf2_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfncvt_rod_f_f_w_f32m1_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfncvt_rod_f_f_w_f32m2_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfncvt_rod_f_f_w_f32m4_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfncvt_rod_f_f_w_f16mf4_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfncvt_rod_f_f_w_f16mf2_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfncvt_rod_f_f_w_f16m1_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfncvt_rod_f_f_w_f16m2_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfncvt_rod_f_f_w_f16m4_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfncvt_rod_f_f_w_f32mf2_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfncvt_rod_f_f_w_f32m1_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfncvt_rod_f_f_w_f32m2_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfncvt_rod_f_f_w_f32m4_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfncvt_rod_f_f_w_f16mf4_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfncvt_rod_f_f_w_f16mf2_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfncvt_rod_f_f_w_f16m1_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfncvt_rod_f_f_w_f16m2_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfncvt_rod_f_f_w_f16m4_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfncvt_rod_f_f_w_f32mf2_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfncvt_rod_f_f_w_f32m1_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfncvt_rod_f_f_w_f32m2_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfncvt_rod_f_f_w_f32m4_mu(vm, vd, vs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfncvt_rtz.c b/auto-generated/policy_funcs/llvm-api-tests/vfncvt_rtz.c index 1b2ab7538..64d84100c 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfncvt_rtz.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfncvt_rtz.c @@ -1,487 +1,607 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tu(vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tu(vint8mf8_t vd, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i8mf8_tu(vd, vs2, vl); } -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tu(vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tu(vint8mf4_t vd, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i8mf4_tu(vd, vs2, vl); } -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tu(vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tu(vint8mf2_t vd, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i8mf2_tu(vd, vs2, vl); } -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tu(vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tu(vint8m1_t vd, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i8m1_tu(vd, vs2, vl); } -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tu(vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tu(vint8m2_t vd, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i8m2_tu(vd, vs2, vl); } -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tu(vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tu(vint8m4_t vd, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i8m4_tu(vd, vs2, vl); } -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tu(vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tu(vuint8mf8_t vd, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u8mf8_tu(vd, vs2, vl); } -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tu(vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tu(vuint8mf4_t vd, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u8mf4_tu(vd, vs2, vl); } -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tu(vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tu(vuint8mf2_t vd, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u8mf2_tu(vd, vs2, vl); } -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tu(vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tu(vuint8m1_t vd, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u8m1_tu(vd, vs2, vl); } -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tu(vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tu(vuint8m2_t vd, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u8m2_tu(vd, vs2, vl); } -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tu(vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tu(vuint8m4_t vd, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u8m4_tu(vd, vs2, vl); } -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tu(vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tu(vint16mf4_t vd, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i16mf4_tu(vd, vs2, vl); } -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tu(vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tu(vint16mf2_t vd, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i16mf2_tu(vd, vs2, vl); } -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tu(vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tu(vint16m1_t vd, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i16m1_tu(vd, vs2, vl); } -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tu(vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tu(vint16m2_t vd, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i16m2_tu(vd, vs2, vl); } -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tu(vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tu(vint16m4_t vd, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i16m4_tu(vd, vs2, vl); } -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tu(vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tu(vuint16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u16mf4_tu(vd, vs2, vl); } -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tu(vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tu(vuint16mf2_t vd, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u16mf2_tu(vd, vs2, vl); } -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tu(vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tu(vuint16m1_t vd, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u16m1_tu(vd, vs2, vl); } -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tu(vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tu(vuint16m2_t vd, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u16m2_tu(vd, vs2, vl); } -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tu(vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tu(vuint16m4_t vd, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u16m4_tu(vd, vs2, vl); } -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tu(vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tu(vint32mf2_t vd, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i32mf2_tu(vd, vs2, vl); } -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tu(vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tu(vint32m1_t vd, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i32m1_tu(vd, vs2, vl); } -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tu(vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tu(vint32m2_t vd, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i32m2_tu(vd, vs2, vl); } -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tu(vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tu(vint32m4_t vd, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i32m4_tu(vd, vs2, vl); } -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tu(vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tu(vuint32mf2_t vd, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tu(vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tu(vuint32m1_t vd, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tu(vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tu(vuint32m2_t vd, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tu(vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tu(vuint32m4_t vd, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u32m4_tu(vd, vs2, vl); } -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i8mf8_tum(vm, vd, vs2, vl); } -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i8mf4_tum(vm, vd, vs2, vl); } -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i8mf2_tum(vm, vd, vs2, vl); } -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tum(vbool8_t vm, vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tum(vbool8_t vm, vint8m1_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i8m1_tum(vm, vd, vs2, vl); } -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tum(vbool4_t vm, vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tum(vbool4_t vm, vint8m2_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i8m2_tum(vm, vd, vs2, vl); } -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tum(vbool2_t vm, vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tum(vbool2_t vm, vint8m4_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i8m4_tum(vm, vd, vs2, vl); } -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u8mf8_tum(vm, vd, vs2, vl); } -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u8mf4_tum(vm, vd, vs2, vl); } -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u8mf2_tum(vm, vd, vs2, vl); } -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tum(vbool8_t vm, vuint8m1_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u8m1_tum(vm, vd, vs2, vl); } -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tum(vbool4_t vm, vuint8m2_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u8m2_tum(vm, vd, vs2, vl); } -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tum(vbool2_t vm, vuint8m4_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u8m4_tum(vm, vd, vs2, vl); } -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i16mf4_tum(vm, vd, vs2, vl); } -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i16mf2_tum(vm, vd, vs2, vl); } -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tum(vbool16_t vm, vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tum(vbool16_t vm, vint16m1_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i16m1_tum(vm, vd, vs2, vl); } -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tum(vbool8_t vm, vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tum(vbool8_t vm, vint16m2_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i16m2_tum(vm, vd, vs2, vl); } -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tum(vbool4_t vm, vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tum(vbool4_t vm, vint16m4_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i16m4_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u16mf4_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u16mf2_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u16m1_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u16m2_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u16m4_tum(vm, vd, vs2, vl); } -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i32mf2_tum(vm, vd, vs2, vl); } -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tum(vbool32_t vm, vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tum(vbool32_t vm, vint32m1_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i32m1_tum(vm, vd, vs2, vl); } -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tum(vbool16_t vm, vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tum(vbool16_t vm, vint32m2_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i32m2_tum(vm, vd, vs2, vl); } -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tum(vbool8_t vm, vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tum(vbool8_t vm, vint32m4_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i32m4_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u32mf2_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u32m1_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u32m2_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u32m4_tum(vm, vd, vs2, vl); } -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i8mf8_tumu(vm, vd, vs2, vl); } -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i8mf4_tumu(vm, vd, vs2, vl); } -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i8mf2_tumu(vm, vd, vs2, vl); } -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_tumu(vbool8_t vm, vint8m1_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i8m1_tumu(vm, vd, vs2, vl); } -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_tumu(vbool4_t vm, vint8m2_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i8m2_tumu(vm, vd, vs2, vl); } -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_tumu(vbool2_t vm, vint8m4_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i8m4_tumu(vm, vd, vs2, vl); } -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u8mf8_tumu(vm, vd, vs2, vl); } -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u8mf4_tumu(vm, vd, vs2, vl); } -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u8mf2_tumu(vm, vd, vs2, vl); } -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u8m1_tumu(vm, vd, vs2, vl); } -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u8m2_tumu(vm, vd, vs2, vl); } -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u8m4_tumu(vm, vd, vs2, vl); } -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i16mf4_tumu(vm, vd, vs2, vl); } -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i16mf2_tumu(vm, vd, vs2, vl); } -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i16m1_tumu(vm, vd, vs2, vl); } -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_tumu(vbool8_t vm, vint16m2_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i16m2_tumu(vm, vd, vs2, vl); } -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_tumu(vbool4_t vm, vint16m4_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i16m4_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u16mf4_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u16mf2_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u16m1_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u16m2_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u16m4_tumu(vm, vd, vs2, vl); } -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i32mf2_tumu(vm, vd, vs2, vl); } -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i32m1_tumu(vm, vd, vs2, vl); } -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i32m2_tumu(vm, vd, vs2, vl); } -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_tumu(vbool8_t vm, vint32m4_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i32m4_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u32mf2_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u32m1_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u32m2_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u32m4_tumu(vm, vd, vs2, vl); } -vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { +vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i8mf8_mu(vm, vd, vs2, vl); } -vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { +vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i8mf4_mu(vm, vd, vs2, vl); } -vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { +vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i8mf2_mu(vm, vd, vs2, vl); } -vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_mu(vbool8_t vm, vint8m1_t vd, vfloat16m2_t vs2, size_t vl) { +vint8m1_t test_vfncvt_rtz_x_f_w_i8m1_mu(vbool8_t vm, vint8m1_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i8m1_mu(vm, vd, vs2, vl); } -vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_mu(vbool4_t vm, vint8m2_t vd, vfloat16m4_t vs2, size_t vl) { +vint8m2_t test_vfncvt_rtz_x_f_w_i8m2_mu(vbool4_t vm, vint8m2_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i8m2_mu(vm, vd, vs2, vl); } -vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_mu(vbool2_t vm, vint8m4_t vd, vfloat16m8_t vs2, size_t vl) { +vint8m4_t test_vfncvt_rtz_x_f_w_i8m4_mu(vbool2_t vm, vint8m4_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i8m4_mu(vm, vd, vs2, vl); } -vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u8mf8_mu(vm, vd, vs2, vl); } -vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u8mf4_mu(vm, vd, vs2, vl); } -vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vfloat16m1_t vs2, size_t vl) { +vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u8mf2_mu(vm, vd, vs2, vl); } -vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vfloat16m2_t vs2, size_t vl) { +vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1_mu(vbool8_t vm, vuint8m1_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u8m1_mu(vm, vd, vs2, vl); } -vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vfloat16m4_t vs2, size_t vl) { +vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2_mu(vbool4_t vm, vuint8m2_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u8m2_mu(vm, vd, vs2, vl); } -vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vfloat16m8_t vs2, size_t vl) { +vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4_mu(vbool2_t vm, vuint8m4_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u8m4_mu(vm, vd, vs2, vl); } -vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i16mf4_mu(vm, vd, vs2, vl); } -vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i16mf2_mu(vm, vd, vs2, vl); } -vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_mu(vbool16_t vm, vint16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vint16m1_t test_vfncvt_rtz_x_f_w_i16m1_mu(vbool16_t vm, vint16m1_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i16m1_mu(vm, vd, vs2, vl); } -vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_mu(vbool8_t vm, vint16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vint16m2_t test_vfncvt_rtz_x_f_w_i16m2_mu(vbool8_t vm, vint16m2_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i16m2_mu(vm, vd, vs2, vl); } -vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_mu(vbool4_t vm, vint16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vint16m4_t test_vfncvt_rtz_x_f_w_i16m4_mu(vbool4_t vm, vint16m4_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i16m4_mu(vm, vd, vs2, vl); } -vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u16mf4_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vfloat32m1_t vs2, size_t vl) { +vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u16mf2_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vfloat32m2_t vs2, size_t vl) { +vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u16m1_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vfloat32m4_t vs2, size_t vl) { +vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u16m2_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vfloat32m8_t vs2, size_t vl) { +vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u16m4_mu(vm, vd, vs2, vl); } -vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i32mf2_mu(vm, vd, vs2, vl); } -vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_mu(vbool32_t vm, vint32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vint32m1_t test_vfncvt_rtz_x_f_w_i32m1_mu(vbool32_t vm, vint32m1_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i32m1_mu(vm, vd, vs2, vl); } -vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_mu(vbool16_t vm, vint32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vint32m2_t test_vfncvt_rtz_x_f_w_i32m2_mu(vbool16_t vm, vint32m2_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i32m2_mu(vm, vd, vs2, vl); } -vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_mu(vbool8_t vm, vint32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vint32m4_t test_vfncvt_rtz_x_f_w_i32m4_mu(vbool8_t vm, vint32m4_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfncvt_rtz_x_f_w_i32m4_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vfloat64m1_t vs2, size_t vl) { +vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u32mf2_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vfloat64m2_t vs2, size_t vl) { +vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u32m1_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vfloat64m4_t vs2, size_t vl) { +vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u32m2_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vfloat64m8_t vs2, size_t vl) { +vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfncvt_rtz_xu_f_w_u32m4_mu(vm, vd, vs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfneg.c b/auto-generated/policy_funcs/llvm-api-tests/vfneg.c index e31e9145e..e4d203d35 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfneg.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfneg.c @@ -1,247 +1,307 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vfneg_v_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs, size_t vl) { +vfloat16mf4_t test_vfneg_v_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs, + size_t vl) { return __riscv_vfneg_v_f16mf4_tu(vd, vs, vl); } -vfloat16mf2_t test_vfneg_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs, size_t vl) { +vfloat16mf2_t test_vfneg_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs, + size_t vl) { return __riscv_vfneg_v_f16mf2_tu(vd, vs, vl); } -vfloat16m1_t test_vfneg_v_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs, size_t vl) { +vfloat16m1_t test_vfneg_v_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs, + size_t vl) { return __riscv_vfneg_v_f16m1_tu(vd, vs, vl); } -vfloat16m2_t test_vfneg_v_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs, size_t vl) { +vfloat16m2_t test_vfneg_v_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs, + size_t vl) { return __riscv_vfneg_v_f16m2_tu(vd, vs, vl); } -vfloat16m4_t test_vfneg_v_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs, size_t vl) { +vfloat16m4_t test_vfneg_v_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs, + size_t vl) { return __riscv_vfneg_v_f16m4_tu(vd, vs, vl); } -vfloat16m8_t test_vfneg_v_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs, size_t vl) { +vfloat16m8_t test_vfneg_v_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs, + size_t vl) { return __riscv_vfneg_v_f16m8_tu(vd, vs, vl); } -vfloat32mf2_t test_vfneg_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs, size_t vl) { +vfloat32mf2_t test_vfneg_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs, + size_t vl) { return __riscv_vfneg_v_f32mf2_tu(vd, vs, vl); } -vfloat32m1_t test_vfneg_v_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs, size_t vl) { +vfloat32m1_t test_vfneg_v_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs, + size_t vl) { return __riscv_vfneg_v_f32m1_tu(vd, vs, vl); } -vfloat32m2_t test_vfneg_v_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs, size_t vl) { +vfloat32m2_t test_vfneg_v_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs, + size_t vl) { return __riscv_vfneg_v_f32m2_tu(vd, vs, vl); } -vfloat32m4_t test_vfneg_v_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs, size_t vl) { +vfloat32m4_t test_vfneg_v_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs, + size_t vl) { return __riscv_vfneg_v_f32m4_tu(vd, vs, vl); } -vfloat32m8_t test_vfneg_v_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs, size_t vl) { +vfloat32m8_t test_vfneg_v_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs, + size_t vl) { return __riscv_vfneg_v_f32m8_tu(vd, vs, vl); } -vfloat64m1_t test_vfneg_v_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs, size_t vl) { +vfloat64m1_t test_vfneg_v_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs, + size_t vl) { return __riscv_vfneg_v_f64m1_tu(vd, vs, vl); } -vfloat64m2_t test_vfneg_v_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs, size_t vl) { +vfloat64m2_t test_vfneg_v_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs, + size_t vl) { return __riscv_vfneg_v_f64m2_tu(vd, vs, vl); } -vfloat64m4_t test_vfneg_v_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs, size_t vl) { +vfloat64m4_t test_vfneg_v_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs, + size_t vl) { return __riscv_vfneg_v_f64m4_tu(vd, vs, vl); } -vfloat64m8_t test_vfneg_v_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs, size_t vl) { +vfloat64m8_t test_vfneg_v_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs, + size_t vl) { return __riscv_vfneg_v_f64m8_tu(vd, vs, vl); } -vfloat16mf4_t test_vfneg_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs, size_t vl) { +vfloat16mf4_t test_vfneg_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs, size_t vl) { return __riscv_vfneg_v_f16mf4_tum(vm, vd, vs, vl); } -vfloat16mf2_t test_vfneg_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs, size_t vl) { +vfloat16mf2_t test_vfneg_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs, size_t vl) { return __riscv_vfneg_v_f16mf2_tum(vm, vd, vs, vl); } -vfloat16m1_t test_vfneg_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs, size_t vl) { +vfloat16m1_t test_vfneg_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs, size_t vl) { return __riscv_vfneg_v_f16m1_tum(vm, vd, vs, vl); } -vfloat16m2_t test_vfneg_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs, size_t vl) { +vfloat16m2_t test_vfneg_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs, size_t vl) { return __riscv_vfneg_v_f16m2_tum(vm, vd, vs, vl); } -vfloat16m4_t test_vfneg_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs, size_t vl) { +vfloat16m4_t test_vfneg_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs, size_t vl) { return __riscv_vfneg_v_f16m4_tum(vm, vd, vs, vl); } -vfloat16m8_t test_vfneg_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs, size_t vl) { +vfloat16m8_t test_vfneg_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs, size_t vl) { return __riscv_vfneg_v_f16m8_tum(vm, vd, vs, vl); } -vfloat32mf2_t test_vfneg_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs, size_t vl) { +vfloat32mf2_t test_vfneg_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs, size_t vl) { return __riscv_vfneg_v_f32mf2_tum(vm, vd, vs, vl); } -vfloat32m1_t test_vfneg_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs, size_t vl) { +vfloat32m1_t test_vfneg_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs, size_t vl) { return __riscv_vfneg_v_f32m1_tum(vm, vd, vs, vl); } -vfloat32m2_t test_vfneg_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs, size_t vl) { +vfloat32m2_t test_vfneg_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs, size_t vl) { return __riscv_vfneg_v_f32m2_tum(vm, vd, vs, vl); } -vfloat32m4_t test_vfneg_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs, size_t vl) { +vfloat32m4_t test_vfneg_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs, size_t vl) { return __riscv_vfneg_v_f32m4_tum(vm, vd, vs, vl); } -vfloat32m8_t test_vfneg_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs, size_t vl) { +vfloat32m8_t test_vfneg_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs, size_t vl) { return __riscv_vfneg_v_f32m8_tum(vm, vd, vs, vl); } -vfloat64m1_t test_vfneg_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs, size_t vl) { +vfloat64m1_t test_vfneg_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs, size_t vl) { return __riscv_vfneg_v_f64m1_tum(vm, vd, vs, vl); } -vfloat64m2_t test_vfneg_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs, size_t vl) { +vfloat64m2_t test_vfneg_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs, size_t vl) { return __riscv_vfneg_v_f64m2_tum(vm, vd, vs, vl); } -vfloat64m4_t test_vfneg_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs, size_t vl) { +vfloat64m4_t test_vfneg_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs, size_t vl) { return __riscv_vfneg_v_f64m4_tum(vm, vd, vs, vl); } -vfloat64m8_t test_vfneg_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs, size_t vl) { +vfloat64m8_t test_vfneg_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs, size_t vl) { return __riscv_vfneg_v_f64m8_tum(vm, vd, vs, vl); } -vfloat16mf4_t test_vfneg_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs, size_t vl) { +vfloat16mf4_t test_vfneg_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs, size_t vl) { return __riscv_vfneg_v_f16mf4_tumu(vm, vd, vs, vl); } -vfloat16mf2_t test_vfneg_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs, size_t vl) { +vfloat16mf2_t test_vfneg_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs, size_t vl) { return __riscv_vfneg_v_f16mf2_tumu(vm, vd, vs, vl); } -vfloat16m1_t test_vfneg_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs, size_t vl) { +vfloat16m1_t test_vfneg_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs, size_t vl) { return __riscv_vfneg_v_f16m1_tumu(vm, vd, vs, vl); } -vfloat16m2_t test_vfneg_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs, size_t vl) { +vfloat16m2_t test_vfneg_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs, size_t vl) { return __riscv_vfneg_v_f16m2_tumu(vm, vd, vs, vl); } -vfloat16m4_t test_vfneg_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs, size_t vl) { +vfloat16m4_t test_vfneg_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs, size_t vl) { return __riscv_vfneg_v_f16m4_tumu(vm, vd, vs, vl); } -vfloat16m8_t test_vfneg_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs, size_t vl) { +vfloat16m8_t test_vfneg_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs, size_t vl) { return __riscv_vfneg_v_f16m8_tumu(vm, vd, vs, vl); } -vfloat32mf2_t test_vfneg_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs, size_t vl) { +vfloat32mf2_t test_vfneg_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs, size_t vl) { return __riscv_vfneg_v_f32mf2_tumu(vm, vd, vs, vl); } -vfloat32m1_t test_vfneg_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs, size_t vl) { +vfloat32m1_t test_vfneg_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs, size_t vl) { return __riscv_vfneg_v_f32m1_tumu(vm, vd, vs, vl); } -vfloat32m2_t test_vfneg_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs, size_t vl) { +vfloat32m2_t test_vfneg_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs, size_t vl) { return __riscv_vfneg_v_f32m2_tumu(vm, vd, vs, vl); } -vfloat32m4_t test_vfneg_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs, size_t vl) { +vfloat32m4_t test_vfneg_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs, size_t vl) { return __riscv_vfneg_v_f32m4_tumu(vm, vd, vs, vl); } -vfloat32m8_t test_vfneg_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs, size_t vl) { +vfloat32m8_t test_vfneg_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs, size_t vl) { return __riscv_vfneg_v_f32m8_tumu(vm, vd, vs, vl); } -vfloat64m1_t test_vfneg_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs, size_t vl) { +vfloat64m1_t test_vfneg_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs, size_t vl) { return __riscv_vfneg_v_f64m1_tumu(vm, vd, vs, vl); } -vfloat64m2_t test_vfneg_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs, size_t vl) { +vfloat64m2_t test_vfneg_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs, size_t vl) { return __riscv_vfneg_v_f64m2_tumu(vm, vd, vs, vl); } -vfloat64m4_t test_vfneg_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs, size_t vl) { +vfloat64m4_t test_vfneg_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs, size_t vl) { return __riscv_vfneg_v_f64m4_tumu(vm, vd, vs, vl); } -vfloat64m8_t test_vfneg_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs, size_t vl) { +vfloat64m8_t test_vfneg_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs, size_t vl) { return __riscv_vfneg_v_f64m8_tumu(vm, vd, vs, vl); } -vfloat16mf4_t test_vfneg_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs, size_t vl) { +vfloat16mf4_t test_vfneg_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs, size_t vl) { return __riscv_vfneg_v_f16mf4_mu(vm, vd, vs, vl); } -vfloat16mf2_t test_vfneg_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs, size_t vl) { +vfloat16mf2_t test_vfneg_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs, size_t vl) { return __riscv_vfneg_v_f16mf2_mu(vm, vd, vs, vl); } -vfloat16m1_t test_vfneg_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs, size_t vl) { +vfloat16m1_t test_vfneg_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs, size_t vl) { return __riscv_vfneg_v_f16m1_mu(vm, vd, vs, vl); } -vfloat16m2_t test_vfneg_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs, size_t vl) { +vfloat16m2_t test_vfneg_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs, size_t vl) { return __riscv_vfneg_v_f16m2_mu(vm, vd, vs, vl); } -vfloat16m4_t test_vfneg_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs, size_t vl) { +vfloat16m4_t test_vfneg_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs, size_t vl) { return __riscv_vfneg_v_f16m4_mu(vm, vd, vs, vl); } -vfloat16m8_t test_vfneg_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs, size_t vl) { +vfloat16m8_t test_vfneg_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs, size_t vl) { return __riscv_vfneg_v_f16m8_mu(vm, vd, vs, vl); } -vfloat32mf2_t test_vfneg_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs, size_t vl) { +vfloat32mf2_t test_vfneg_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs, size_t vl) { return __riscv_vfneg_v_f32mf2_mu(vm, vd, vs, vl); } -vfloat32m1_t test_vfneg_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs, size_t vl) { +vfloat32m1_t test_vfneg_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs, size_t vl) { return __riscv_vfneg_v_f32m1_mu(vm, vd, vs, vl); } -vfloat32m2_t test_vfneg_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs, size_t vl) { +vfloat32m2_t test_vfneg_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs, size_t vl) { return __riscv_vfneg_v_f32m2_mu(vm, vd, vs, vl); } -vfloat32m4_t test_vfneg_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs, size_t vl) { +vfloat32m4_t test_vfneg_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs, size_t vl) { return __riscv_vfneg_v_f32m4_mu(vm, vd, vs, vl); } -vfloat32m8_t test_vfneg_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs, size_t vl) { +vfloat32m8_t test_vfneg_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs, size_t vl) { return __riscv_vfneg_v_f32m8_mu(vm, vd, vs, vl); } -vfloat64m1_t test_vfneg_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs, size_t vl) { +vfloat64m1_t test_vfneg_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs, size_t vl) { return __riscv_vfneg_v_f64m1_mu(vm, vd, vs, vl); } -vfloat64m2_t test_vfneg_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs, size_t vl) { +vfloat64m2_t test_vfneg_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs, size_t vl) { return __riscv_vfneg_v_f64m2_mu(vm, vd, vs, vl); } -vfloat64m4_t test_vfneg_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs, size_t vl) { +vfloat64m4_t test_vfneg_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs, size_t vl) { return __riscv_vfneg_v_f64m4_mu(vm, vd, vs, vl); } -vfloat64m8_t test_vfneg_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs, size_t vl) { +vfloat64m8_t test_vfneg_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs, size_t vl) { return __riscv_vfneg_v_f64m8_mu(vm, vd, vs, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfnmacc.c b/auto-generated/policy_funcs/llvm-api-tests/vfnmacc.c index 23fde2e4c..f46242dd1 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfnmacc.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfnmacc.c @@ -1,967 +1,1408 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vfnmacc_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmacc_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f16mf4_tu(vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmacc_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmacc_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f16mf4_tu(vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmacc_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmacc_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f16mf2_tu(vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmacc_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmacc_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f16mf2_tu(vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmacc_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmacc_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f16m1_tu(vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmacc_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmacc_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f16m1_tu(vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmacc_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmacc_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f16m2_tu(vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmacc_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmacc_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f16m2_tu(vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmacc_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmacc_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f16m4_tu(vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmacc_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmacc_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f16m4_tu(vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmacc_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmacc_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f16m8_tu(vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmacc_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmacc_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f16m8_tu(vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f32mf2_tu(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmacc_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmacc_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f32mf2_tu(vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f32m1_tu(vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmacc_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmacc_vf_f32m1_tu(vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f32m1_tu(vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f32m2_tu(vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmacc_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmacc_vf_f32m2_tu(vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f32m2_tu(vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f32m4_tu(vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmacc_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmacc_vf_f32m4_tu(vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f32m4_tu(vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f32m8_tu(vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmacc_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmacc_vf_f32m8_tu(vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f32m8_tu(vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f64m1_tu(vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmacc_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmacc_vf_f64m1_tu(vfloat64m1_t vd, double rs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f64m1_tu(vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f64m2_tu(vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmacc_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmacc_vf_f64m2_tu(vfloat64m2_t vd, double rs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f64m2_tu(vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f64m4_tu(vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmacc_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmacc_vf_f64m4_tu(vfloat64m4_t vd, double rs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f64m4_tu(vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f64m8_tu(vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmacc_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmacc_vf_f64m8_tu(vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f64m8_tu(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmacc_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmacc_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f16mf4_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmacc_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmacc_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f16mf4_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmacc_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmacc_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f16mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmacc_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmacc_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f16mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmacc_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmacc_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f16m1_tum(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmacc_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmacc_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f16m1_tum(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmacc_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmacc_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f16m2_tum(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmacc_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmacc_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f16m2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmacc_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmacc_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f16m4_tum(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmacc_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmacc_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f16m4_tum(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmacc_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmacc_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f16m8_tum(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmacc_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmacc_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f16m8_tum(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmacc_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmacc_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmacc_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmacc_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f32mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmacc_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmacc_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmacc_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmacc_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f32m1_tum(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmacc_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmacc_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmacc_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmacc_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f32m2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmacc_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmacc_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmacc_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmacc_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f32m4_tum(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmacc_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmacc_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmacc_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmacc_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f32m8_tum(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmacc_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmacc_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmacc_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmacc_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f64m1_tum(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmacc_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmacc_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmacc_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmacc_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f64m2_tum(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmacc_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmacc_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmacc_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmacc_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f64m4_tum(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmacc_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmacc_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmacc_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmacc_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f64m8_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmacc_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmacc_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f16mf4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmacc_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmacc_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f16mf4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmacc_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmacc_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f16mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmacc_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmacc_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f16mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmacc_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmacc_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f16m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmacc_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmacc_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f16m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmacc_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmacc_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f16m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmacc_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmacc_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f16m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmacc_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmacc_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f16m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmacc_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmacc_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f16m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmacc_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmacc_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f16m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmacc_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmacc_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f16m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmacc_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmacc_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmacc_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmacc_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f32mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmacc_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmacc_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmacc_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmacc_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + float rs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f32m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmacc_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmacc_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmacc_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmacc_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + float rs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f32m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmacc_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmacc_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmacc_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmacc_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f32m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmacc_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmacc_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmacc_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmacc_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f32m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmacc_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmacc_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmacc_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmacc_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f64m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmacc_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmacc_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmacc_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmacc_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f64m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmacc_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmacc_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmacc_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmacc_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f64m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmacc_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmacc_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmacc_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmacc_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + double rs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f64m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmacc_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmacc_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f16mf4_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmacc_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmacc_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f16mf4_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmacc_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmacc_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f16mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmacc_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmacc_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f16mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmacc_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmacc_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f16m1_mu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmacc_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmacc_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f16m1_mu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmacc_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmacc_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f16m2_mu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmacc_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmacc_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f16m2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmacc_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmacc_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f16m4_mu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmacc_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmacc_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f16m4_mu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmacc_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmacc_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f16m8_mu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmacc_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmacc_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f16m8_mu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmacc_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmacc_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmacc_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmacc_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f32mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmacc_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmacc_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmacc_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmacc_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f32m1_mu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmacc_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmacc_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmacc_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmacc_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f32m2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmacc_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmacc_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmacc_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmacc_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f32m4_mu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmacc_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmacc_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmacc_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmacc_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f32m8_mu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmacc_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmacc_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmacc_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmacc_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, double rs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f64m1_mu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmacc_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmacc_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmacc_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmacc_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, double rs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f64m2_mu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmacc_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmacc_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmacc_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmacc_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, double rs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f64m4_mu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmacc_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmacc_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f64m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmacc_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmacc_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f64m8_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmacc_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmacc_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f16mf4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmacc_vf_f16mf4_rm_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmacc_vf_f16mf4_rm_tu(vfloat16mf4_t vd, _Float16 rs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f16mf4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmacc_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmacc_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f16mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmacc_vf_f16mf2_rm_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmacc_vf_f16mf2_rm_tu(vfloat16mf2_t vd, _Float16 rs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f16mf2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmacc_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmacc_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f16m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmacc_vf_f16m1_rm_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmacc_vf_f16m1_rm_tu(vfloat16m1_t vd, _Float16 rs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f16m1_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmacc_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmacc_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f16m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmacc_vf_f16m2_rm_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmacc_vf_f16m2_rm_tu(vfloat16m2_t vd, _Float16 rs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f16m2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmacc_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmacc_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f16m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmacc_vf_f16m4_rm_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmacc_vf_f16m4_rm_tu(vfloat16m4_t vd, _Float16 rs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f16m4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmacc_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmacc_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f16m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmacc_vf_f16m8_rm_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmacc_vf_f16m8_rm_tu(vfloat16m8_t vd, _Float16 rs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f16m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmacc_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmacc_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmacc_vf_f32mf2_rm_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmacc_vf_f32mf2_rm_tu(vfloat32mf2_t vd, float rs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f32mf2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmacc_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmacc_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmacc_vf_f32m1_rm_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmacc_vf_f32m1_rm_tu(vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f32m1_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmacc_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmacc_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmacc_vf_f32m2_rm_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmacc_vf_f32m2_rm_tu(vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f32m2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmacc_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmacc_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmacc_vf_f32m4_rm_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmacc_vf_f32m4_rm_tu(vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f32m4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmacc_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmacc_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmacc_vf_f32m8_rm_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmacc_vf_f32m8_rm_tu(vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f32m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmacc_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmacc_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f64m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmacc_vf_f64m1_rm_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmacc_vf_f64m1_rm_tu(vfloat64m1_t vd, double rs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f64m1_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmacc_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmacc_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f64m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmacc_vf_f64m2_rm_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmacc_vf_f64m2_rm_tu(vfloat64m2_t vd, double rs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f64m2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmacc_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmacc_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f64m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmacc_vf_f64m4_rm_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmacc_vf_f64m4_rm_tu(vfloat64m4_t vd, double rs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f64m4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmacc_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmacc_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmacc_vv_f64m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmacc_vf_f64m8_rm_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmacc_vf_f64m8_rm_tu(vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmacc_vf_f64m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmacc_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16mf4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmacc_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16mf4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf4_t test_vfnmacc_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16mf4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmacc_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfnmacc_vf_f16mf4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf2_t test_vfnmacc_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmacc_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf2_t test_vfnmacc_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmacc_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfnmacc_vf_f16mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m1_t test_vfnmacc_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmacc_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f16m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmacc_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmacc_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f16m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmacc_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmacc_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f16m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmacc_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmacc_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f16m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmacc_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmacc_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f16m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmacc_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmacc_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f16m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmacc_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmacc_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f16m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmacc_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmacc_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f16m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmacc_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmacc_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfnmacc_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmacc_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfnmacc_vf_f32mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfnmacc_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmacc_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmacc_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmacc_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + float rs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f32m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmacc_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmacc_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmacc_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmacc_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + float rs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f32m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmacc_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmacc_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmacc_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmacc_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + float rs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f32m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmacc_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmacc_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmacc_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmacc_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + float rs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f32m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmacc_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmacc_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmacc_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmacc_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f64m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmacc_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmacc_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmacc_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmacc_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f64m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmacc_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmacc_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmacc_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmacc_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f64m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmacc_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmacc_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmacc_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmacc_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + double rs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f64m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmacc_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16mf4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmacc_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16mf4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf4_t test_vfnmacc_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16mf4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmacc_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfnmacc_vf_f16mf4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf2_t test_vfnmacc_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmacc_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f16mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf2_t test_vfnmacc_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmacc_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfnmacc_vf_f16mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m1_t test_vfnmacc_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmacc_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { + return __riscv_vfnmacc_vv_f16m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m1_t test_vfnmacc_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmacc_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { + return __riscv_vfnmacc_vf_f16m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m2_t test_vfnmacc_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmacc_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { + return __riscv_vfnmacc_vv_f16m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m2_t test_vfnmacc_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmacc_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { + return __riscv_vfnmacc_vf_f16m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m4_t test_vfnmacc_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmacc_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { + return __riscv_vfnmacc_vv_f16m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m4_t test_vfnmacc_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmacc_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { + return __riscv_vfnmacc_vf_f16m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m8_t test_vfnmacc_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f16m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmacc_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { + return __riscv_vfnmacc_vv_f16m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m8_t test_vfnmacc_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f16m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmacc_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { + return __riscv_vfnmacc_vf_f16m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfnmacc_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmacc_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfnmacc_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmacc_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfnmacc_vf_f32mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfnmacc_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmacc_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfnmacc_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfnmacc_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmacc_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + float rs1, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfnmacc_vf_f32m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m2_t test_vfnmacc_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmacc_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfnmacc_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m2_t test_vfnmacc_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmacc_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + float rs1, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfnmacc_vf_f32m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m4_t test_vfnmacc_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmacc_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfnmacc_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m4_t test_vfnmacc_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmacc_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + float rs1, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfnmacc_vf_f32m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m8_t test_vfnmacc_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmacc_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfnmacc_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m8_t test_vfnmacc_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f32m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmacc_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + float rs1, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfnmacc_vf_f32m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfnmacc_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmacc_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { + return __riscv_vfnmacc_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfnmacc_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmacc_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { + return __riscv_vfnmacc_vf_f64m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m2_t test_vfnmacc_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmacc_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { + return __riscv_vfnmacc_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m2_t test_vfnmacc_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmacc_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { + return __riscv_vfnmacc_vf_f64m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m4_t test_vfnmacc_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmacc_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { + return __riscv_vfnmacc_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m4_t test_vfnmacc_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmacc_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { + return __riscv_vfnmacc_vf_f64m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m8_t test_vfnmacc_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmacc_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { + return __riscv_vfnmacc_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m8_t test_vfnmacc_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmacc_vf_f64m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmacc_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + double rs1, vfloat64m8_t vs2, + size_t vl) { + return __riscv_vfnmacc_vf_f64m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf4_t test_vfnmacc_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmacc_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f16mf4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmacc_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmacc_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f16mf4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmacc_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmacc_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f16mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmacc_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmacc_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f16mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmacc_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmacc_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f16m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmacc_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmacc_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f16m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmacc_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmacc_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f16m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmacc_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmacc_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f16m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmacc_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmacc_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f16m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmacc_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmacc_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f16m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmacc_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmacc_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f16m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmacc_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmacc_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f16m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmacc_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmacc_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmacc_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmacc_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f32mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmacc_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmacc_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmacc_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmacc_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + float rs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f32m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmacc_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmacc_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmacc_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmacc_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + float rs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f32m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmacc_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmacc_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmacc_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmacc_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + float rs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f32m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmacc_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmacc_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmacc_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmacc_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + float rs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f32m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmacc_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmacc_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmacc_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmacc_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f64m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmacc_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmacc_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmacc_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmacc_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f64m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmacc_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmacc_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmacc_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmacc_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f64m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmacc_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmacc_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmacc_vv_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmacc_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmacc_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + double rs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmacc_vf_f64m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfnmadd.c b/auto-generated/policy_funcs/llvm-api-tests/vfnmadd.c index 06bc673a9..9c36ecf23 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfnmadd.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfnmadd.c @@ -1,967 +1,1408 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vfnmadd_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmadd_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f16mf4_tu(vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmadd_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmadd_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f16mf4_tu(vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmadd_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmadd_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f16mf2_tu(vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmadd_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmadd_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f16mf2_tu(vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmadd_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmadd_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f16m1_tu(vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmadd_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmadd_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f16m1_tu(vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmadd_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmadd_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f16m2_tu(vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmadd_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmadd_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f16m2_tu(vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmadd_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmadd_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f16m4_tu(vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmadd_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmadd_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f16m4_tu(vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmadd_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmadd_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f16m8_tu(vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmadd_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmadd_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f16m8_tu(vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmadd_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmadd_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f32mf2_tu(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmadd_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmadd_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f32mf2_tu(vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmadd_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmadd_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f32m1_tu(vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmadd_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmadd_vf_f32m1_tu(vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f32m1_tu(vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmadd_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmadd_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f32m2_tu(vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmadd_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmadd_vf_f32m2_tu(vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f32m2_tu(vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmadd_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmadd_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f32m4_tu(vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmadd_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmadd_vf_f32m4_tu(vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f32m4_tu(vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmadd_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmadd_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f32m8_tu(vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmadd_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmadd_vf_f32m8_tu(vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f32m8_tu(vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmadd_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmadd_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f64m1_tu(vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmadd_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmadd_vf_f64m1_tu(vfloat64m1_t vd, double rs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f64m1_tu(vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmadd_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmadd_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f64m2_tu(vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmadd_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmadd_vf_f64m2_tu(vfloat64m2_t vd, double rs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f64m2_tu(vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmadd_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmadd_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f64m4_tu(vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmadd_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmadd_vf_f64m4_tu(vfloat64m4_t vd, double rs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f64m4_tu(vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmadd_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmadd_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f64m8_tu(vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmadd_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmadd_vf_f64m8_tu(vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f64m8_tu(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmadd_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmadd_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f16mf4_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmadd_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmadd_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f16mf4_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmadd_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmadd_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f16mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmadd_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmadd_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f16mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmadd_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmadd_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f16m1_tum(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmadd_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmadd_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f16m1_tum(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmadd_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmadd_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f16m2_tum(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmadd_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmadd_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f16m2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmadd_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmadd_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f16m4_tum(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmadd_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmadd_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f16m4_tum(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmadd_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmadd_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f16m8_tum(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmadd_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmadd_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f16m8_tum(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmadd_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmadd_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmadd_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmadd_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f32mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmadd_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmadd_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmadd_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmadd_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f32m1_tum(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmadd_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmadd_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmadd_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmadd_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f32m2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmadd_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmadd_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmadd_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmadd_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f32m4_tum(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmadd_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmadd_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmadd_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmadd_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f32m8_tum(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmadd_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmadd_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmadd_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmadd_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f64m1_tum(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmadd_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmadd_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmadd_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmadd_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f64m2_tum(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmadd_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmadd_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmadd_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmadd_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f64m4_tum(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmadd_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmadd_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmadd_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmadd_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f64m8_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmadd_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmadd_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f16mf4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmadd_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmadd_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f16mf4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmadd_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmadd_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f16mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmadd_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmadd_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f16mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmadd_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmadd_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f16m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmadd_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmadd_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f16m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmadd_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmadd_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f16m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmadd_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmadd_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f16m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmadd_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmadd_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f16m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmadd_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmadd_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f16m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmadd_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmadd_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f16m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmadd_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmadd_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f16m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmadd_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmadd_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmadd_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmadd_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f32mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmadd_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmadd_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmadd_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmadd_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + float rs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f32m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmadd_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmadd_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmadd_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmadd_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + float rs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f32m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmadd_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmadd_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmadd_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmadd_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f32m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmadd_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmadd_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmadd_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmadd_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f32m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmadd_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmadd_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmadd_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmadd_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f64m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmadd_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmadd_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmadd_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmadd_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f64m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmadd_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmadd_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmadd_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmadd_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f64m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmadd_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmadd_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmadd_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmadd_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + double rs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f64m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmadd_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmadd_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f16mf4_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmadd_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmadd_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f16mf4_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmadd_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmadd_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f16mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmadd_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmadd_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f16mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmadd_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmadd_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f16m1_mu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmadd_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmadd_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f16m1_mu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmadd_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmadd_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f16m2_mu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmadd_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmadd_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f16m2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmadd_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmadd_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f16m4_mu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmadd_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmadd_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f16m4_mu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmadd_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmadd_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f16m8_mu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmadd_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmadd_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f16m8_mu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmadd_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmadd_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmadd_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmadd_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f32mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmadd_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmadd_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmadd_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmadd_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f32m1_mu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmadd_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmadd_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmadd_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmadd_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f32m2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmadd_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmadd_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmadd_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmadd_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f32m4_mu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmadd_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmadd_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmadd_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmadd_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f32m8_mu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmadd_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmadd_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmadd_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmadd_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, double rs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f64m1_mu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmadd_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmadd_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmadd_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmadd_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, double rs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f64m2_mu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmadd_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmadd_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmadd_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmadd_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, double rs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f64m4_mu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmadd_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmadd_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f64m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmadd_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmadd_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f64m8_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmadd_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmadd_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f16mf4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmadd_vf_f16mf4_rm_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmadd_vf_f16mf4_rm_tu(vfloat16mf4_t vd, _Float16 rs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f16mf4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmadd_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmadd_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f16mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmadd_vf_f16mf2_rm_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmadd_vf_f16mf2_rm_tu(vfloat16mf2_t vd, _Float16 rs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f16mf2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmadd_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmadd_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f16m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmadd_vf_f16m1_rm_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmadd_vf_f16m1_rm_tu(vfloat16m1_t vd, _Float16 rs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f16m1_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmadd_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmadd_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f16m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmadd_vf_f16m2_rm_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmadd_vf_f16m2_rm_tu(vfloat16m2_t vd, _Float16 rs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f16m2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmadd_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmadd_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f16m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmadd_vf_f16m4_rm_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmadd_vf_f16m4_rm_tu(vfloat16m4_t vd, _Float16 rs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f16m4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmadd_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmadd_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f16m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmadd_vf_f16m8_rm_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmadd_vf_f16m8_rm_tu(vfloat16m8_t vd, _Float16 rs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f16m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmadd_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmadd_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmadd_vf_f32mf2_rm_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmadd_vf_f32mf2_rm_tu(vfloat32mf2_t vd, float rs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f32mf2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmadd_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmadd_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmadd_vf_f32m1_rm_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmadd_vf_f32m1_rm_tu(vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f32m1_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmadd_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmadd_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmadd_vf_f32m2_rm_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmadd_vf_f32m2_rm_tu(vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f32m2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmadd_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmadd_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmadd_vf_f32m4_rm_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmadd_vf_f32m4_rm_tu(vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f32m4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmadd_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmadd_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmadd_vf_f32m8_rm_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmadd_vf_f32m8_rm_tu(vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f32m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmadd_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmadd_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f64m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmadd_vf_f64m1_rm_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmadd_vf_f64m1_rm_tu(vfloat64m1_t vd, double rs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f64m1_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmadd_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmadd_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f64m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmadd_vf_f64m2_rm_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmadd_vf_f64m2_rm_tu(vfloat64m2_t vd, double rs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f64m2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmadd_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmadd_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f64m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmadd_vf_f64m4_rm_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmadd_vf_f64m4_rm_tu(vfloat64m4_t vd, double rs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f64m4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmadd_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmadd_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmadd_vv_f64m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmadd_vf_f64m8_rm_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmadd_vf_f64m8_rm_tu(vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmadd_vf_f64m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmadd_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16mf4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmadd_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16mf4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf4_t test_vfnmadd_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16mf4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmadd_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfnmadd_vf_f16mf4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf2_t test_vfnmadd_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmadd_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf2_t test_vfnmadd_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmadd_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfnmadd_vf_f16mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m1_t test_vfnmadd_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmadd_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f16m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmadd_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmadd_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f16m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmadd_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmadd_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f16m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmadd_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmadd_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f16m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmadd_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmadd_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f16m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmadd_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmadd_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f16m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmadd_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmadd_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f16m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmadd_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmadd_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f16m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmadd_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmadd_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfnmadd_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmadd_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfnmadd_vf_f32mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfnmadd_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmadd_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmadd_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmadd_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + float rs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f32m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmadd_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmadd_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmadd_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmadd_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + float rs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f32m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmadd_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmadd_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmadd_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmadd_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + float rs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f32m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmadd_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmadd_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmadd_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmadd_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + float rs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f32m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmadd_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmadd_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmadd_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmadd_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f64m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmadd_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmadd_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmadd_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmadd_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f64m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmadd_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmadd_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmadd_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmadd_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f64m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmadd_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmadd_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmadd_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmadd_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + double rs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f64m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmadd_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16mf4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmadd_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16mf4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf4_t test_vfnmadd_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16mf4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmadd_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfnmadd_vf_f16mf4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf2_t test_vfnmadd_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmadd_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f16mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf2_t test_vfnmadd_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmadd_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfnmadd_vf_f16mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m1_t test_vfnmadd_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmadd_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { + return __riscv_vfnmadd_vv_f16m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m1_t test_vfnmadd_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmadd_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { + return __riscv_vfnmadd_vf_f16m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m2_t test_vfnmadd_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmadd_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { + return __riscv_vfnmadd_vv_f16m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m2_t test_vfnmadd_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmadd_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { + return __riscv_vfnmadd_vf_f16m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m4_t test_vfnmadd_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmadd_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { + return __riscv_vfnmadd_vv_f16m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m4_t test_vfnmadd_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmadd_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { + return __riscv_vfnmadd_vf_f16m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m8_t test_vfnmadd_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f16m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmadd_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { + return __riscv_vfnmadd_vv_f16m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m8_t test_vfnmadd_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f16m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmadd_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { + return __riscv_vfnmadd_vf_f16m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfnmadd_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmadd_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfnmadd_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmadd_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfnmadd_vf_f32mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfnmadd_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmadd_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfnmadd_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfnmadd_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmadd_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + float rs1, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfnmadd_vf_f32m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m2_t test_vfnmadd_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmadd_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfnmadd_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m2_t test_vfnmadd_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmadd_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + float rs1, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfnmadd_vf_f32m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m4_t test_vfnmadd_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmadd_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfnmadd_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m4_t test_vfnmadd_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmadd_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + float rs1, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfnmadd_vf_f32m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m8_t test_vfnmadd_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmadd_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfnmadd_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m8_t test_vfnmadd_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f32m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmadd_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + float rs1, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfnmadd_vf_f32m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfnmadd_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmadd_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { + return __riscv_vfnmadd_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfnmadd_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmadd_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { + return __riscv_vfnmadd_vf_f64m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m2_t test_vfnmadd_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmadd_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { + return __riscv_vfnmadd_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m2_t test_vfnmadd_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmadd_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { + return __riscv_vfnmadd_vf_f64m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m4_t test_vfnmadd_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmadd_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { + return __riscv_vfnmadd_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m4_t test_vfnmadd_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmadd_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { + return __riscv_vfnmadd_vf_f64m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m8_t test_vfnmadd_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmadd_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { + return __riscv_vfnmadd_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m8_t test_vfnmadd_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmadd_vf_f64m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmadd_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + double rs1, vfloat64m8_t vs2, + size_t vl) { + return __riscv_vfnmadd_vf_f64m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf4_t test_vfnmadd_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmadd_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f16mf4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmadd_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmadd_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f16mf4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmadd_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmadd_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f16mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmadd_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmadd_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f16mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmadd_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmadd_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f16m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmadd_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmadd_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f16m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmadd_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmadd_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f16m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmadd_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmadd_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f16m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmadd_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmadd_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f16m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmadd_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmadd_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f16m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmadd_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmadd_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f16m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmadd_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmadd_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f16m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmadd_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmadd_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmadd_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmadd_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f32mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmadd_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmadd_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmadd_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmadd_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + float rs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f32m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmadd_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmadd_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmadd_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmadd_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + float rs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f32m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmadd_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmadd_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmadd_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmadd_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + float rs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f32m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmadd_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmadd_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmadd_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmadd_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + float rs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f32m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmadd_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmadd_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmadd_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmadd_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f64m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmadd_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmadd_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmadd_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmadd_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f64m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmadd_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmadd_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmadd_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmadd_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f64m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmadd_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmadd_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmadd_vv_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmadd_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmadd_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + double rs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmadd_vf_f64m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfnmsac.c b/auto-generated/policy_funcs/llvm-api-tests/vfnmsac.c index b8fa9d237..ab6eddd87 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfnmsac.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfnmsac.c @@ -1,967 +1,1408 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vfnmsac_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsac_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f16mf4_tu(vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmsac_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsac_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f16mf4_tu(vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmsac_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsac_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f16mf2_tu(vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmsac_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsac_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f16mf2_tu(vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmsac_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsac_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f16m1_tu(vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmsac_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsac_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f16m1_tu(vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmsac_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsac_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f16m2_tu(vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmsac_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsac_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f16m2_tu(vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmsac_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsac_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f16m4_tu(vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmsac_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsac_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f16m4_tu(vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmsac_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsac_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f16m8_tu(vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmsac_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsac_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f16m8_tu(vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f32mf2_tu(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmsac_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmsac_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f32mf2_tu(vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f32m1_tu(vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmsac_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmsac_vf_f32m1_tu(vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f32m1_tu(vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f32m2_tu(vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmsac_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmsac_vf_f32m2_tu(vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f32m2_tu(vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f32m4_tu(vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmsac_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmsac_vf_f32m4_tu(vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f32m4_tu(vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f32m8_tu(vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmsac_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmsac_vf_f32m8_tu(vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f32m8_tu(vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f64m1_tu(vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmsac_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmsac_vf_f64m1_tu(vfloat64m1_t vd, double rs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f64m1_tu(vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f64m2_tu(vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmsac_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmsac_vf_f64m2_tu(vfloat64m2_t vd, double rs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f64m2_tu(vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f64m4_tu(vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmsac_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmsac_vf_f64m4_tu(vfloat64m4_t vd, double rs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f64m4_tu(vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f64m8_tu(vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmsac_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmsac_vf_f64m8_tu(vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f64m8_tu(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmsac_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsac_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f16mf4_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmsac_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsac_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f16mf4_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmsac_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsac_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f16mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmsac_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsac_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f16mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmsac_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsac_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f16m1_tum(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmsac_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsac_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f16m1_tum(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmsac_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsac_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f16m2_tum(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmsac_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsac_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f16m2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmsac_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsac_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f16m4_tum(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmsac_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsac_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f16m4_tum(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmsac_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsac_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f16m8_tum(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmsac_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsac_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f16m8_tum(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmsac_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmsac_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmsac_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmsac_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f32mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmsac_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmsac_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmsac_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmsac_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f32m1_tum(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmsac_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmsac_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmsac_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmsac_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f32m2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmsac_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmsac_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmsac_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmsac_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f32m4_tum(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmsac_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmsac_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmsac_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmsac_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f32m8_tum(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmsac_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmsac_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmsac_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmsac_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f64m1_tum(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmsac_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmsac_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmsac_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmsac_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f64m2_tum(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmsac_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmsac_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmsac_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmsac_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f64m4_tum(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmsac_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmsac_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmsac_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmsac_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f64m8_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmsac_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsac_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f16mf4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmsac_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsac_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f16mf4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmsac_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsac_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f16mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmsac_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsac_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f16mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmsac_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsac_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f16m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmsac_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsac_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f16m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmsac_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsac_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f16m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmsac_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsac_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f16m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmsac_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsac_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f16m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmsac_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsac_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f16m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmsac_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsac_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f16m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmsac_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsac_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f16m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmsac_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmsac_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmsac_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmsac_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f32mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmsac_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmsac_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmsac_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmsac_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + float rs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f32m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmsac_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmsac_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmsac_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmsac_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + float rs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f32m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmsac_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmsac_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmsac_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmsac_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f32m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmsac_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmsac_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmsac_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmsac_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f32m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmsac_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmsac_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmsac_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmsac_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f64m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmsac_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmsac_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmsac_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmsac_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f64m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmsac_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmsac_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmsac_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmsac_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f64m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmsac_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmsac_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmsac_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmsac_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + double rs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f64m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmsac_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsac_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f16mf4_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmsac_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsac_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f16mf4_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmsac_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsac_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f16mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmsac_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsac_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f16mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmsac_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsac_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f16m1_mu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmsac_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsac_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f16m1_mu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmsac_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsac_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f16m2_mu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmsac_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsac_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f16m2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmsac_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsac_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f16m4_mu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmsac_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsac_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f16m4_mu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmsac_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsac_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f16m8_mu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmsac_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsac_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f16m8_mu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmsac_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmsac_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmsac_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmsac_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f32mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmsac_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmsac_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmsac_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmsac_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f32m1_mu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmsac_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmsac_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmsac_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmsac_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f32m2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmsac_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmsac_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmsac_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmsac_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f32m4_mu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmsac_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmsac_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmsac_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmsac_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f32m8_mu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmsac_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmsac_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmsac_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmsac_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, double rs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f64m1_mu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmsac_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmsac_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmsac_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmsac_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, double rs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f64m2_mu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmsac_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmsac_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmsac_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmsac_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, double rs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f64m4_mu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmsac_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmsac_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f64m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmsac_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmsac_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f64m8_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmsac_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsac_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f16mf4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsac_vf_f16mf4_rm_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsac_vf_f16mf4_rm_tu(vfloat16mf4_t vd, _Float16 rs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f16mf4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsac_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsac_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f16mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsac_vf_f16mf2_rm_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsac_vf_f16mf2_rm_tu(vfloat16mf2_t vd, _Float16 rs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f16mf2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsac_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsac_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f16m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsac_vf_f16m1_rm_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsac_vf_f16m1_rm_tu(vfloat16m1_t vd, _Float16 rs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f16m1_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsac_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsac_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f16m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsac_vf_f16m2_rm_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsac_vf_f16m2_rm_tu(vfloat16m2_t vd, _Float16 rs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f16m2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsac_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsac_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f16m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsac_vf_f16m4_rm_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsac_vf_f16m4_rm_tu(vfloat16m4_t vd, _Float16 rs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f16m4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsac_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsac_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f16m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsac_vf_f16m8_rm_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsac_vf_f16m8_rm_tu(vfloat16m8_t vd, _Float16 rs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f16m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsac_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmsac_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsac_vf_f32mf2_rm_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmsac_vf_f32mf2_rm_tu(vfloat32mf2_t vd, float rs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f32mf2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsac_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmsac_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsac_vf_f32m1_rm_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmsac_vf_f32m1_rm_tu(vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f32m1_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsac_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmsac_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsac_vf_f32m2_rm_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmsac_vf_f32m2_rm_tu(vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f32m2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsac_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmsac_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsac_vf_f32m4_rm_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmsac_vf_f32m4_rm_tu(vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f32m4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsac_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmsac_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsac_vf_f32m8_rm_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmsac_vf_f32m8_rm_tu(vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f32m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsac_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmsac_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f64m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsac_vf_f64m1_rm_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmsac_vf_f64m1_rm_tu(vfloat64m1_t vd, double rs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f64m1_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsac_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmsac_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f64m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsac_vf_f64m2_rm_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmsac_vf_f64m2_rm_tu(vfloat64m2_t vd, double rs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f64m2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsac_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmsac_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f64m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsac_vf_f64m4_rm_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmsac_vf_f64m4_rm_tu(vfloat64m4_t vd, double rs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f64m4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsac_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmsac_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmsac_vv_f64m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsac_vf_f64m8_rm_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmsac_vf_f64m8_rm_tu(vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmsac_vf_f64m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsac_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16mf4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsac_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16mf4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf4_t test_vfnmsac_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16mf4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsac_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfnmsac_vf_f16mf4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf2_t test_vfnmsac_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsac_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf2_t test_vfnmsac_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsac_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfnmsac_vf_f16mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m1_t test_vfnmsac_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsac_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f16m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsac_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsac_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f16m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsac_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsac_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f16m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsac_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsac_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f16m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsac_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsac_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f16m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsac_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsac_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f16m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsac_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsac_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f16m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsac_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsac_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f16m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsac_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsac_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfnmsac_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsac_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfnmsac_vf_f32mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfnmsac_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmsac_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsac_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmsac_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + float rs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f32m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsac_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmsac_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsac_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmsac_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + float rs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f32m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsac_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmsac_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsac_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmsac_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + float rs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f32m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsac_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmsac_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsac_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmsac_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + float rs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f32m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsac_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmsac_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsac_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmsac_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f64m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsac_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmsac_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsac_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmsac_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f64m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsac_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmsac_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsac_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmsac_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f64m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsac_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmsac_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsac_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmsac_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + double rs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f64m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsac_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16mf4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsac_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16mf4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf4_t test_vfnmsac_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16mf4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsac_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfnmsac_vf_f16mf4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf2_t test_vfnmsac_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsac_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f16mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf2_t test_vfnmsac_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsac_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfnmsac_vf_f16mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m1_t test_vfnmsac_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsac_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { + return __riscv_vfnmsac_vv_f16m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m1_t test_vfnmsac_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsac_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { + return __riscv_vfnmsac_vf_f16m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m2_t test_vfnmsac_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsac_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { + return __riscv_vfnmsac_vv_f16m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m2_t test_vfnmsac_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsac_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { + return __riscv_vfnmsac_vf_f16m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m4_t test_vfnmsac_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsac_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { + return __riscv_vfnmsac_vv_f16m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m4_t test_vfnmsac_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsac_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { + return __riscv_vfnmsac_vf_f16m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m8_t test_vfnmsac_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f16m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsac_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { + return __riscv_vfnmsac_vv_f16m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m8_t test_vfnmsac_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f16m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsac_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { + return __riscv_vfnmsac_vf_f16m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfnmsac_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsac_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfnmsac_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsac_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfnmsac_vf_f32mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfnmsac_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsac_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfnmsac_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfnmsac_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsac_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + float rs1, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfnmsac_vf_f32m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m2_t test_vfnmsac_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsac_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfnmsac_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m2_t test_vfnmsac_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsac_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + float rs1, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfnmsac_vf_f32m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m4_t test_vfnmsac_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsac_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfnmsac_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m4_t test_vfnmsac_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsac_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + float rs1, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfnmsac_vf_f32m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m8_t test_vfnmsac_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsac_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfnmsac_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m8_t test_vfnmsac_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f32m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsac_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + float rs1, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfnmsac_vf_f32m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfnmsac_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsac_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { + return __riscv_vfnmsac_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfnmsac_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsac_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { + return __riscv_vfnmsac_vf_f64m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m2_t test_vfnmsac_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsac_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { + return __riscv_vfnmsac_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m2_t test_vfnmsac_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsac_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { + return __riscv_vfnmsac_vf_f64m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m4_t test_vfnmsac_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsac_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { + return __riscv_vfnmsac_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m4_t test_vfnmsac_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsac_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { + return __riscv_vfnmsac_vf_f64m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m8_t test_vfnmsac_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsac_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { + return __riscv_vfnmsac_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m8_t test_vfnmsac_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsac_vf_f64m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsac_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + double rs1, vfloat64m8_t vs2, + size_t vl) { + return __riscv_vfnmsac_vf_f64m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf4_t test_vfnmsac_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsac_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f16mf4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsac_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsac_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f16mf4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsac_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsac_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f16mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsac_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsac_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f16mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsac_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsac_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f16m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsac_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsac_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f16m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsac_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsac_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f16m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsac_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsac_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f16m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsac_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsac_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f16m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsac_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsac_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f16m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsac_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsac_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f16m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsac_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsac_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f16m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsac_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmsac_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsac_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmsac_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f32mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsac_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmsac_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsac_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmsac_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + float rs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f32m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsac_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmsac_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsac_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmsac_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + float rs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f32m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsac_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmsac_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsac_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmsac_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + float rs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f32m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsac_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmsac_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsac_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmsac_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + float rs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f32m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsac_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmsac_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsac_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmsac_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f64m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsac_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmsac_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsac_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmsac_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f64m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsac_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmsac_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsac_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmsac_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f64m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsac_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmsac_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmsac_vv_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsac_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmsac_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + double rs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmsac_vf_f64m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfnmsub.c b/auto-generated/policy_funcs/llvm-api-tests/vfnmsub.c index acaf81233..8eea091af 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfnmsub.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfnmsub.c @@ -1,967 +1,1408 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vfnmsub_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsub_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f16mf4_tu(vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmsub_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsub_vf_f16mf4_tu(vfloat16mf4_t vd, _Float16 rs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f16mf4_tu(vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmsub_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsub_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f16mf2_tu(vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmsub_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsub_vf_f16mf2_tu(vfloat16mf2_t vd, _Float16 rs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f16mf2_tu(vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmsub_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsub_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f16m1_tu(vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmsub_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsub_vf_f16m1_tu(vfloat16m1_t vd, _Float16 rs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f16m1_tu(vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmsub_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsub_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f16m2_tu(vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmsub_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsub_vf_f16m2_tu(vfloat16m2_t vd, _Float16 rs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f16m2_tu(vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmsub_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsub_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f16m4_tu(vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmsub_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsub_vf_f16m4_tu(vfloat16m4_t vd, _Float16 rs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f16m4_tu(vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmsub_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsub_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f16m8_tu(vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmsub_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsub_vf_f16m8_tu(vfloat16m8_t vd, _Float16 rs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f16m8_tu(vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmsub_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmsub_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f32mf2_tu(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmsub_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmsub_vf_f32mf2_tu(vfloat32mf2_t vd, float rs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f32mf2_tu(vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmsub_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmsub_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f32m1_tu(vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmsub_vf_f32m1_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmsub_vf_f32m1_tu(vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f32m1_tu(vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmsub_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmsub_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f32m2_tu(vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmsub_vf_f32m2_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmsub_vf_f32m2_tu(vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f32m2_tu(vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmsub_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmsub_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f32m4_tu(vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmsub_vf_f32m4_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmsub_vf_f32m4_tu(vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f32m4_tu(vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmsub_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmsub_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f32m8_tu(vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmsub_vf_f32m8_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmsub_vf_f32m8_tu(vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f32m8_tu(vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmsub_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmsub_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f64m1_tu(vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmsub_vf_f64m1_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmsub_vf_f64m1_tu(vfloat64m1_t vd, double rs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f64m1_tu(vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmsub_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmsub_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f64m2_tu(vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmsub_vf_f64m2_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmsub_vf_f64m2_tu(vfloat64m2_t vd, double rs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f64m2_tu(vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmsub_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmsub_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f64m4_tu(vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmsub_vf_f64m4_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmsub_vf_f64m4_tu(vfloat64m4_t vd, double rs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f64m4_tu(vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmsub_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmsub_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f64m8_tu(vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmsub_vf_f64m8_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmsub_vf_f64m8_tu(vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f64m8_tu(vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmsub_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsub_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f16mf4_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmsub_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsub_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f16mf4_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmsub_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsub_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f16mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmsub_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsub_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f16mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmsub_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsub_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f16m1_tum(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmsub_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsub_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f16m1_tum(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmsub_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsub_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f16m2_tum(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmsub_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsub_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f16m2_tum(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmsub_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsub_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f16m4_tum(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmsub_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsub_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f16m4_tum(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmsub_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsub_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f16m8_tum(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmsub_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsub_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f16m8_tum(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmsub_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmsub_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmsub_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmsub_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f32mf2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmsub_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmsub_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmsub_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmsub_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f32m1_tum(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmsub_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmsub_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmsub_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmsub_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f32m2_tum(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmsub_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmsub_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmsub_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmsub_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f32m4_tum(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmsub_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmsub_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmsub_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmsub_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f32m8_tum(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmsub_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmsub_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmsub_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmsub_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f64m1_tum(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmsub_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmsub_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmsub_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmsub_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f64m2_tum(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmsub_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmsub_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmsub_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmsub_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f64m4_tum(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmsub_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmsub_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmsub_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmsub_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f64m8_tum(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmsub_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsub_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f16mf4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmsub_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsub_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f16mf4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmsub_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsub_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f16mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmsub_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsub_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f16mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmsub_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsub_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f16m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmsub_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsub_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f16m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmsub_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsub_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f16m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmsub_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsub_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f16m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmsub_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsub_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f16m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmsub_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsub_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f16m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmsub_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsub_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f16m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmsub_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsub_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f16m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmsub_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmsub_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmsub_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmsub_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f32mf2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmsub_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmsub_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmsub_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmsub_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + float rs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f32m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmsub_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmsub_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmsub_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmsub_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + float rs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f32m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmsub_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmsub_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmsub_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmsub_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f32m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmsub_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmsub_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmsub_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmsub_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f32m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmsub_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmsub_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmsub_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmsub_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f64m1_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmsub_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmsub_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmsub_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmsub_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f64m2_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmsub_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmsub_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmsub_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmsub_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f64m4_tumu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmsub_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmsub_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmsub_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmsub_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + double rs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f64m8_tumu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmsub_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsub_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f16mf4_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf4_t test_vfnmsub_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsub_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f16mf4_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf2_t test_vfnmsub_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsub_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f16mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat16mf2_t test_vfnmsub_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsub_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f16mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m1_t test_vfnmsub_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsub_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f16m1_mu(vm, vd, vs1, vs2, vl); } -vfloat16m1_t test_vfnmsub_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsub_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f16m1_mu(vm, vd, rs1, vs2, vl); } -vfloat16m2_t test_vfnmsub_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsub_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f16m2_mu(vm, vd, vs1, vs2, vl); } -vfloat16m2_t test_vfnmsub_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsub_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f16m2_mu(vm, vd, rs1, vs2, vl); } -vfloat16m4_t test_vfnmsub_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsub_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f16m4_mu(vm, vd, vs1, vs2, vl); } -vfloat16m4_t test_vfnmsub_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsub_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f16m4_mu(vm, vd, rs1, vs2, vl); } -vfloat16m8_t test_vfnmsub_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsub_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f16m8_mu(vm, vd, vs1, vs2, vl); } -vfloat16m8_t test_vfnmsub_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsub_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f16m8_mu(vm, vd, rs1, vs2, vl); } -vfloat32mf2_t test_vfnmsub_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmsub_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfnmsub_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmsub_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f32mf2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m1_t test_vfnmsub_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmsub_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfnmsub_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmsub_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f32m1_mu(vm, vd, rs1, vs2, vl); } -vfloat32m2_t test_vfnmsub_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmsub_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfnmsub_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmsub_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f32m2_mu(vm, vd, rs1, vs2, vl); } -vfloat32m4_t test_vfnmsub_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmsub_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfnmsub_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmsub_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f32m4_mu(vm, vd, rs1, vs2, vl); } -vfloat32m8_t test_vfnmsub_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmsub_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfnmsub_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmsub_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f32m8_mu(vm, vd, rs1, vs2, vl); } -vfloat64m1_t test_vfnmsub_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmsub_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfnmsub_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmsub_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, double rs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f64m1_mu(vm, vd, rs1, vs2, vl); } -vfloat64m2_t test_vfnmsub_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmsub_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfnmsub_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmsub_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, double rs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f64m2_mu(vm, vd, rs1, vs2, vl); } -vfloat64m4_t test_vfnmsub_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmsub_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfnmsub_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmsub_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, double rs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f64m4_mu(vm, vd, rs1, vs2, vl); } -vfloat64m8_t test_vfnmsub_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmsub_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f64m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfnmsub_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmsub_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f64m8_mu(vm, vd, rs1, vs2, vl); } -vfloat16mf4_t test_vfnmsub_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsub_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f16mf4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsub_vf_f16mf4_rm_tu(vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsub_vf_f16mf4_rm_tu(vfloat16mf4_t vd, _Float16 rs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f16mf4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsub_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsub_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f16mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsub_vf_f16mf2_rm_tu(vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsub_vf_f16mf2_rm_tu(vfloat16mf2_t vd, _Float16 rs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f16mf2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsub_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsub_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f16m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsub_vf_f16m1_rm_tu(vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsub_vf_f16m1_rm_tu(vfloat16m1_t vd, _Float16 rs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f16m1_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsub_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsub_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f16m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsub_vf_f16m2_rm_tu(vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsub_vf_f16m2_rm_tu(vfloat16m2_t vd, _Float16 rs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f16m2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsub_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsub_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f16m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsub_vf_f16m4_rm_tu(vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsub_vf_f16m4_rm_tu(vfloat16m4_t vd, _Float16 rs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f16m4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsub_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsub_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f16m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsub_vf_f16m8_rm_tu(vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsub_vf_f16m8_rm_tu(vfloat16m8_t vd, _Float16 rs1, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f16m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsub_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmsub_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsub_vf_f32mf2_rm_tu(vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmsub_vf_f32mf2_rm_tu(vfloat32mf2_t vd, float rs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f32mf2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsub_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmsub_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsub_vf_f32m1_rm_tu(vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmsub_vf_f32m1_rm_tu(vfloat32m1_t vd, float rs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f32m1_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsub_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmsub_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsub_vf_f32m2_rm_tu(vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmsub_vf_f32m2_rm_tu(vfloat32m2_t vd, float rs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f32m2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsub_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmsub_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsub_vf_f32m4_rm_tu(vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmsub_vf_f32m4_rm_tu(vfloat32m4_t vd, float rs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f32m4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsub_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmsub_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsub_vf_f32m8_rm_tu(vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmsub_vf_f32m8_rm_tu(vfloat32m8_t vd, float rs1, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f32m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsub_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmsub_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f64m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsub_vf_f64m1_rm_tu(vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmsub_vf_f64m1_rm_tu(vfloat64m1_t vd, double rs1, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f64m1_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsub_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmsub_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f64m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsub_vf_f64m2_rm_tu(vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmsub_vf_f64m2_rm_tu(vfloat64m2_t vd, double rs1, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f64m2_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsub_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmsub_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f64m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsub_vf_f64m4_rm_tu(vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmsub_vf_f64m4_rm_tu(vfloat64m4_t vd, double rs1, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f64m4_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsub_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmsub_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmsub_vv_f64m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsub_vf_f64m8_rm_tu(vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmsub_vf_f64m8_rm_tu(vfloat64m8_t vd, double rs1, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfnmsub_vf_f64m8_rm_tu(vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsub_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16mf4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsub_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16mf4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf4_t test_vfnmsub_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16mf4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsub_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfnmsub_vf_f16mf4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf2_t test_vfnmsub_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsub_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf2_t test_vfnmsub_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsub_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfnmsub_vf_f16mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m1_t test_vfnmsub_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsub_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f16m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsub_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsub_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f16m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsub_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsub_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f16m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsub_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsub_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f16m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsub_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsub_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f16m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsub_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsub_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f16m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsub_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsub_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f16m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsub_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsub_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f16m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsub_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsub_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfnmsub_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsub_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfnmsub_vf_f32mf2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfnmsub_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmsub_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsub_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmsub_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + float rs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f32m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsub_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmsub_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsub_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmsub_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + float rs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f32m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsub_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmsub_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsub_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmsub_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + float rs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f32m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsub_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmsub_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsub_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmsub_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + float rs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f32m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsub_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmsub_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsub_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmsub_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f64m1_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsub_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmsub_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsub_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmsub_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f64m2_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsub_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmsub_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsub_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmsub_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f64m4_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsub_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmsub_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsub_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmsub_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + double rs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f64m8_rm_tum(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsub_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16mf4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsub_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16mf4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf4_t test_vfnmsub_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16mf4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfnmsub_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfnmsub_vf_f16mf4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf2_t test_vfnmsub_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsub_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f16mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf2_t test_vfnmsub_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfnmsub_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfnmsub_vf_f16mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m1_t test_vfnmsub_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsub_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { + return __riscv_vfnmsub_vv_f16m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m1_t test_vfnmsub_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfnmsub_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { + return __riscv_vfnmsub_vf_f16m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m2_t test_vfnmsub_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsub_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { + return __riscv_vfnmsub_vv_f16m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m2_t test_vfnmsub_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m2_t test_vfnmsub_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { + return __riscv_vfnmsub_vf_f16m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m4_t test_vfnmsub_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsub_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { + return __riscv_vfnmsub_vv_f16m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m4_t test_vfnmsub_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m4_t test_vfnmsub_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { + return __riscv_vfnmsub_vf_f16m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m8_t test_vfnmsub_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f16m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsub_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { + return __riscv_vfnmsub_vv_f16m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16m8_t test_vfnmsub_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f16m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat16m8_t test_vfnmsub_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { + return __riscv_vfnmsub_vf_f16m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfnmsub_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsub_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfnmsub_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfnmsub_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfnmsub_vf_f32mf2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfnmsub_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsub_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfnmsub_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfnmsub_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfnmsub_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + float rs1, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfnmsub_vf_f32m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m2_t test_vfnmsub_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsub_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfnmsub_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m2_t test_vfnmsub_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfnmsub_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + float rs1, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfnmsub_vf_f32m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m4_t test_vfnmsub_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsub_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfnmsub_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m4_t test_vfnmsub_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfnmsub_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + float rs1, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfnmsub_vf_f32m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m8_t test_vfnmsub_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsub_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfnmsub_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m8_t test_vfnmsub_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f32m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfnmsub_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + float rs1, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfnmsub_vf_f32m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfnmsub_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsub_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { + return __riscv_vfnmsub_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfnmsub_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfnmsub_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { + return __riscv_vfnmsub_vf_f64m1_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m2_t test_vfnmsub_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsub_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { + return __riscv_vfnmsub_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m2_t test_vfnmsub_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfnmsub_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { + return __riscv_vfnmsub_vf_f64m2_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m4_t test_vfnmsub_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsub_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { + return __riscv_vfnmsub_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m4_t test_vfnmsub_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfnmsub_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { + return __riscv_vfnmsub_vf_f64m4_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m8_t test_vfnmsub_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsub_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { + return __riscv_vfnmsub_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m8_t test_vfnmsub_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { - return __riscv_vfnmsub_vf_f64m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfnmsub_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + double rs1, vfloat64m8_t vs2, + size_t vl) { + return __riscv_vfnmsub_vf_f64m8_rm_tumu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat16mf4_t test_vfnmsub_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsub_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f16mf4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfnmsub_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfnmsub_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + _Float16 rs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f16mf4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsub_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsub_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f16mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfnmsub_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfnmsub_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + _Float16 rs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f16mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsub_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsub_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f16m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfnmsub_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfnmsub_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + _Float16 rs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f16m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsub_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsub_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f16m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfnmsub_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfnmsub_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + _Float16 rs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f16m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsub_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsub_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f16m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfnmsub_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfnmsub_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + _Float16 rs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f16m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsub_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsub_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f16m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfnmsub_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfnmsub_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, + _Float16 rs1, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f16m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsub_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmsub_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfnmsub_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, float rs1, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfnmsub_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + float rs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f32mf2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsub_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmsub_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfnmsub_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, float rs1, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfnmsub_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + float rs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f32m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsub_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmsub_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfnmsub_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, float rs1, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfnmsub_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + float rs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f32m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsub_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmsub_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfnmsub_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, float rs1, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfnmsub_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + float rs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f32m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsub_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmsub_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfnmsub_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, float rs1, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfnmsub_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + float rs1, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f32m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsub_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmsub_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfnmsub_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, double rs1, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfnmsub_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + double rs1, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f64m1_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsub_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmsub_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfnmsub_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, double rs1, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfnmsub_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + double rs1, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f64m2_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsub_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmsub_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfnmsub_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, double rs1, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfnmsub_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + double rs1, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f64m4_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsub_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmsub_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmsub_vv_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfnmsub_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, double rs1, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfnmsub_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + double rs1, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfnmsub_vf_f64m8_rm_mu(vm, vd, rs1, vs2, __RISCV_FRM_RNE, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfrdiv.c b/auto-generated/policy_funcs/llvm-api-tests/vfrdiv.c index 2a601b0f5..df5edd680 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfrdiv.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfrdiv.c @@ -1,487 +1,680 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vfrdiv_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfrdiv_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfrdiv_vf_f16mf4_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfrdiv_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfrdiv_vf_f16mf2_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfrdiv_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfrdiv_vf_f16m1_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfrdiv_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfrdiv_vf_f16m2_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfrdiv_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfrdiv_vf_f16m4_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfrdiv_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfrdiv_vf_f16m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfrdiv_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + float rs1, size_t vl) { return __riscv_vfrdiv_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfrdiv_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + float rs1, size_t vl) { return __riscv_vfrdiv_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfrdiv_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + float rs1, size_t vl) { return __riscv_vfrdiv_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfrdiv_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + float rs1, size_t vl) { return __riscv_vfrdiv_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfrdiv_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + float rs1, size_t vl) { return __riscv_vfrdiv_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfrdiv_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + double rs1, size_t vl) { return __riscv_vfrdiv_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfrdiv_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + double rs1, size_t vl) { return __riscv_vfrdiv_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfrdiv_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + double rs1, size_t vl) { return __riscv_vfrdiv_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfrdiv_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + double rs1, size_t vl) { return __riscv_vfrdiv_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfrdiv_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfrdiv_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16mf4_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfrdiv_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfrdiv_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16m1_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfrdiv_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16m2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfrdiv_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16m4_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfrdiv_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfrdiv_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfrdiv_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfrdiv_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfrdiv_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfrdiv_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfrdiv_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfrdiv_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfrdiv_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfrdiv_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfrdiv_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfrdiv_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vfrdiv_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfrdiv_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vfrdiv_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfrdiv_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vfrdiv_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfrdiv_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, size_t vl) { return __riscv_vfrdiv_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfrdiv_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfrdiv_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16mf4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfrdiv_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfrdiv_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfrdiv_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfrdiv_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfrdiv_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfrdiv_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfrdiv_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfrdiv_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfrdiv_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfrdiv_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfrdiv_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfrdiv_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfrdiv_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfrdiv_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfrdiv_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfrdiv_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, + size_t vl) { return __riscv_vfrdiv_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfrdiv_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, + size_t vl) { return __riscv_vfrdiv_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfrdiv_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, + size_t vl) { return __riscv_vfrdiv_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfrdiv_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, + size_t vl) { return __riscv_vfrdiv_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfrdiv_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfrdiv_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16mf4_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfrdiv_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfrdiv_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16m1_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfrdiv_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16m2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfrdiv_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16m4_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfrdiv_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfrdiv_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfrdiv_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfrdiv_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfrdiv_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfrdiv_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfrdiv_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfrdiv_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfrdiv_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfrdiv_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfrdiv_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfrdiv_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vfrdiv_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfrdiv_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vfrdiv_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfrdiv_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vfrdiv_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfrdiv_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, size_t vl) { return __riscv_vfrdiv_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfrdiv_vf_f16mf4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfrdiv_vf_f16mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfrdiv_vf_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfrdiv_vf_f16m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfrdiv_vf_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfrdiv_vf_f16m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfrdiv_vf_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfrdiv_vf_f16m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfrdiv_vf_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfrdiv_vf_f16m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + float rs1, size_t vl) { return __riscv_vfrdiv_vf_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfrdiv_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + float rs1, size_t vl) { return __riscv_vfrdiv_vf_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfrdiv_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + float rs1, size_t vl) { return __riscv_vfrdiv_vf_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfrdiv_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + float rs1, size_t vl) { return __riscv_vfrdiv_vf_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfrdiv_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + float rs1, size_t vl) { return __riscv_vfrdiv_vf_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfrdiv_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + double rs1, size_t vl) { return __riscv_vfrdiv_vf_f64m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfrdiv_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + double rs1, size_t vl) { return __riscv_vfrdiv_vf_f64m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfrdiv_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + double rs1, size_t vl) { return __riscv_vfrdiv_vf_f64m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfrdiv_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + double rs1, size_t vl) { return __riscv_vfrdiv_vf_f64m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16mf4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfrdiv_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfrdiv_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfrdiv_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfrdiv_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfrdiv_vf_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfrdiv_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, + size_t vl) { return __riscv_vfrdiv_vf_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfrdiv_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, + size_t vl) { return __riscv_vfrdiv_vf_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfrdiv_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, + size_t vl) { return __riscv_vfrdiv_vf_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfrdiv_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, + size_t vl) { return __riscv_vfrdiv_vf_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfrdiv_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, + size_t vl) { return __riscv_vfrdiv_vf_f64m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfrdiv_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, + size_t vl) { return __riscv_vfrdiv_vf_f64m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfrdiv_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, + size_t vl) { return __riscv_vfrdiv_vf_f64m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfrdiv_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, + size_t vl) { return __riscv_vfrdiv_vf_f64m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { - return __riscv_vfrdiv_vf_f16mf4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { + return __riscv_vfrdiv_vf_f16mf4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { - return __riscv_vfrdiv_vf_f16mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { + return __riscv_vfrdiv_vf_f16mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfrdiv_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfrdiv_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfrdiv_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfrdiv_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { - return __riscv_vfrdiv_vf_f32mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { + return __riscv_vfrdiv_vf_f32mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfrdiv_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, + size_t vl) { return __riscv_vfrdiv_vf_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfrdiv_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, + size_t vl) { return __riscv_vfrdiv_vf_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfrdiv_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, + size_t vl) { return __riscv_vfrdiv_vf_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfrdiv_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, + size_t vl) { return __riscv_vfrdiv_vf_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfrdiv_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, + size_t vl) { return __riscv_vfrdiv_vf_f64m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfrdiv_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, + size_t vl) { return __riscv_vfrdiv_vf_f64m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfrdiv_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, + size_t vl) { return __riscv_vfrdiv_vf_f64m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfrdiv_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, + size_t vl) { return __riscv_vfrdiv_vf_f64m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfrdiv_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16mf4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfrdiv_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrdiv_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfrdiv_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrdiv_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfrdiv_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrdiv_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfrdiv_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrdiv_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfrdiv_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrdiv_vf_f16m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfrdiv_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfrdiv_vf_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrdiv_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfrdiv_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, + size_t vl) { return __riscv_vfrdiv_vf_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrdiv_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfrdiv_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, + size_t vl) { return __riscv_vfrdiv_vf_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrdiv_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfrdiv_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, + size_t vl) { return __riscv_vfrdiv_vf_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrdiv_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfrdiv_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, + size_t vl) { return __riscv_vfrdiv_vf_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrdiv_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfrdiv_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, + size_t vl) { return __riscv_vfrdiv_vf_f64m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrdiv_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfrdiv_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, + size_t vl) { return __riscv_vfrdiv_vf_f64m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrdiv_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfrdiv_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, + size_t vl) { return __riscv_vfrdiv_vf_f64m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrdiv_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfrdiv_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, + size_t vl) { return __riscv_vfrdiv_vf_f64m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfrec7.c b/auto-generated/policy_funcs/llvm-api-tests/vfrec7.c index fd69e548a..e544f1329 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfrec7.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfrec7.c @@ -1,487 +1,607 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vfrec7_v_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfrec7_v_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfrec7_v_f16mf4_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfrec7_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfrec7_v_f16mf2_tu(vd, vs2, vl); } -vfloat16m1_t test_vfrec7_v_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfrec7_v_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfrec7_v_f16m1_tu(vd, vs2, vl); } -vfloat16m2_t test_vfrec7_v_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfrec7_v_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfrec7_v_f16m2_tu(vd, vs2, vl); } -vfloat16m4_t test_vfrec7_v_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfrec7_v_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfrec7_v_f16m4_tu(vd, vs2, vl); } -vfloat16m8_t test_vfrec7_v_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfrec7_v_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfrec7_v_f16m8_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfrec7_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfrec7_v_f32mf2_tu(vd, vs2, vl); } -vfloat32m1_t test_vfrec7_v_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfrec7_v_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfrec7_v_f32m1_tu(vd, vs2, vl); } -vfloat32m2_t test_vfrec7_v_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfrec7_v_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfrec7_v_f32m2_tu(vd, vs2, vl); } -vfloat32m4_t test_vfrec7_v_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfrec7_v_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfrec7_v_f32m4_tu(vd, vs2, vl); } -vfloat32m8_t test_vfrec7_v_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfrec7_v_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfrec7_v_f32m8_tu(vd, vs2, vl); } -vfloat64m1_t test_vfrec7_v_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfrec7_v_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfrec7_v_f64m1_tu(vd, vs2, vl); } -vfloat64m2_t test_vfrec7_v_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfrec7_v_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfrec7_v_f64m2_tu(vd, vs2, vl); } -vfloat64m4_t test_vfrec7_v_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfrec7_v_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfrec7_v_f64m4_tu(vd, vs2, vl); } -vfloat64m8_t test_vfrec7_v_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfrec7_v_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfrec7_v_f64m8_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfrec7_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfrec7_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfrec7_v_f16mf4_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfrec7_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfrec7_v_f16mf2_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfrec7_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfrec7_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfrec7_v_f16m1_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfrec7_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfrec7_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfrec7_v_f16m2_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfrec7_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfrec7_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfrec7_v_f16m4_tum(vm, vd, vs2, vl); } -vfloat16m8_t test_vfrec7_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfrec7_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfrec7_v_f16m8_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfrec7_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfrec7_v_f32mf2_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfrec7_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfrec7_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfrec7_v_f32m1_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfrec7_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfrec7_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfrec7_v_f32m2_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfrec7_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfrec7_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfrec7_v_f32m4_tum(vm, vd, vs2, vl); } -vfloat32m8_t test_vfrec7_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfrec7_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfrec7_v_f32m8_tum(vm, vd, vs2, vl); } -vfloat64m1_t test_vfrec7_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfrec7_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfrec7_v_f64m1_tum(vm, vd, vs2, vl); } -vfloat64m2_t test_vfrec7_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfrec7_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfrec7_v_f64m2_tum(vm, vd, vs2, vl); } -vfloat64m4_t test_vfrec7_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfrec7_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfrec7_v_f64m4_tum(vm, vd, vs2, vl); } -vfloat64m8_t test_vfrec7_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfrec7_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfrec7_v_f64m8_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfrec7_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfrec7_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfrec7_v_f16mf4_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfrec7_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfrec7_v_f16mf2_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfrec7_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfrec7_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfrec7_v_f16m1_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfrec7_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfrec7_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfrec7_v_f16m2_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfrec7_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfrec7_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfrec7_v_f16m4_tumu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfrec7_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfrec7_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfrec7_v_f16m8_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfrec7_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfrec7_v_f32mf2_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfrec7_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfrec7_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfrec7_v_f32m1_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfrec7_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfrec7_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfrec7_v_f32m2_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfrec7_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfrec7_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfrec7_v_f32m4_tumu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfrec7_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfrec7_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfrec7_v_f32m8_tumu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfrec7_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfrec7_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfrec7_v_f64m1_tumu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfrec7_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfrec7_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfrec7_v_f64m2_tumu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfrec7_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfrec7_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfrec7_v_f64m4_tumu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfrec7_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfrec7_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfrec7_v_f64m8_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfrec7_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfrec7_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfrec7_v_f16mf4_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfrec7_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfrec7_v_f16mf2_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfrec7_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfrec7_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfrec7_v_f16m1_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfrec7_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfrec7_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfrec7_v_f16m2_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfrec7_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfrec7_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfrec7_v_f16m4_mu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfrec7_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfrec7_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfrec7_v_f16m8_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfrec7_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfrec7_v_f32mf2_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfrec7_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfrec7_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfrec7_v_f32m1_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfrec7_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfrec7_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfrec7_v_f32m2_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfrec7_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfrec7_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfrec7_v_f32m4_mu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfrec7_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfrec7_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfrec7_v_f32m8_mu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfrec7_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfrec7_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfrec7_v_f64m1_mu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfrec7_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfrec7_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfrec7_v_f64m2_mu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfrec7_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfrec7_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfrec7_v_f64m4_mu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfrec7_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfrec7_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfrec7_v_f64m8_mu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfrec7_v_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfrec7_v_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfrec7_v_f16mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfrec7_v_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfrec7_v_f16mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrec7_v_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfrec7_v_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfrec7_v_f16m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrec7_v_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfrec7_v_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfrec7_v_f16m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrec7_v_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfrec7_v_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfrec7_v_f16m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrec7_v_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfrec7_v_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfrec7_v_f16m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfrec7_v_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfrec7_v_f32mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrec7_v_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfrec7_v_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfrec7_v_f32m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrec7_v_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfrec7_v_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfrec7_v_f32m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrec7_v_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfrec7_v_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfrec7_v_f32m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrec7_v_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfrec7_v_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfrec7_v_f32m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrec7_v_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfrec7_v_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfrec7_v_f64m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrec7_v_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfrec7_v_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfrec7_v_f64m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrec7_v_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfrec7_v_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfrec7_v_f64m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrec7_v_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfrec7_v_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfrec7_v_f64m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrec7_v_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfrec7_v_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfrec7_v_f16mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfrec7_v_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfrec7_v_f16mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrec7_v_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfrec7_v_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfrec7_v_f16m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrec7_v_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfrec7_v_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfrec7_v_f16m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrec7_v_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfrec7_v_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfrec7_v_f16m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrec7_v_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfrec7_v_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfrec7_v_f16m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfrec7_v_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfrec7_v_f32mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrec7_v_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfrec7_v_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfrec7_v_f32m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrec7_v_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfrec7_v_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfrec7_v_f32m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrec7_v_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfrec7_v_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfrec7_v_f32m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrec7_v_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfrec7_v_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfrec7_v_f32m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrec7_v_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfrec7_v_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfrec7_v_f64m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrec7_v_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfrec7_v_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfrec7_v_f64m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrec7_v_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfrec7_v_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfrec7_v_f64m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrec7_v_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfrec7_v_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfrec7_v_f64m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrec7_v_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfrec7_v_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfrec7_v_f16mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfrec7_v_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfrec7_v_f16mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrec7_v_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfrec7_v_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfrec7_v_f16m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrec7_v_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfrec7_v_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfrec7_v_f16m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrec7_v_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfrec7_v_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfrec7_v_f16m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrec7_v_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfrec7_v_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfrec7_v_f16m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfrec7_v_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfrec7_v_f32mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrec7_v_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfrec7_v_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfrec7_v_f32m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrec7_v_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfrec7_v_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfrec7_v_f32m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrec7_v_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfrec7_v_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfrec7_v_f32m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrec7_v_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfrec7_v_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfrec7_v_f32m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrec7_v_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfrec7_v_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfrec7_v_f64m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrec7_v_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfrec7_v_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfrec7_v_f64m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrec7_v_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfrec7_v_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfrec7_v_f64m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrec7_v_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfrec7_v_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfrec7_v_f64m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrec7_v_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfrec7_v_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfrec7_v_f16mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrec7_v_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfrec7_v_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfrec7_v_f16mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrec7_v_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfrec7_v_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfrec7_v_f16m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrec7_v_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfrec7_v_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfrec7_v_f16m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrec7_v_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfrec7_v_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfrec7_v_f16m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrec7_v_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfrec7_v_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfrec7_v_f16m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrec7_v_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfrec7_v_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfrec7_v_f32mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrec7_v_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfrec7_v_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfrec7_v_f32m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrec7_v_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfrec7_v_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfrec7_v_f32m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrec7_v_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfrec7_v_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfrec7_v_f32m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrec7_v_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfrec7_v_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfrec7_v_f32m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrec7_v_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfrec7_v_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfrec7_v_f64m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrec7_v_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfrec7_v_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfrec7_v_f64m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrec7_v_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfrec7_v_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfrec7_v_f64m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrec7_v_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfrec7_v_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfrec7_v_f64m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfredmax.c b/auto-generated/policy_funcs/llvm-api-tests/vfredmax.c index 6faade2eb..fdd9e5313 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfredmax.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfredmax.c @@ -1,127 +1,175 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_tu(vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_tu(vfloat16m1_t vd, + vfloat16mf4_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredmax_vs_f16mf4_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_tu(vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_tu(vfloat16m1_t vd, + vfloat16mf2_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredmax_vs_f16mf2_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredmax_vs_f16m1_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_tu(vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_tu(vfloat16m1_t vd, vfloat16m2_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredmax_vs_f16m2_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_tu(vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_tu(vfloat16m1_t vd, vfloat16m4_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredmax_vs_f16m4_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_tu(vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_tu(vfloat16m1_t vd, vfloat16m8_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredmax_vs_f16m8_f16m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tu(vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tu(vfloat32m1_t vd, + vfloat32mf2_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredmax_vs_f32mf2_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredmax_vs_f32m1_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_tu(vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_tu(vfloat32m1_t vd, vfloat32m2_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredmax_vs_f32m2_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_tu(vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_tu(vfloat32m1_t vd, vfloat32m4_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredmax_vs_f32m4_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_tu(vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_tu(vfloat32m1_t vd, vfloat32m8_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredmax_vs_f32m8_f32m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredmax_vs_f64m1_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_tu(vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_tu(vfloat64m1_t vd, vfloat64m2_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredmax_vs_f64m2_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_tu(vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_tu(vfloat64m1_t vd, vfloat64m4_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredmax_vs_f64m4_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_tu(vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_tu(vfloat64m1_t vd, vfloat64m8_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredmax_vs_f64m8_f64m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_tum(vbool64_t vm, vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_tum(vbool64_t vm, vfloat16m1_t vd, + vfloat16mf4_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredmax_vs_f16mf4_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_tum(vbool32_t vm, vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_tum(vbool32_t vm, vfloat16m1_t vd, + vfloat16mf2_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredmax_vs_f16mf2_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredmax_vs_f16m1_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_tum(vbool8_t vm, vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_tum(vbool8_t vm, vfloat16m1_t vd, + vfloat16m2_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredmax_vs_f16m2_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_tum(vbool4_t vm, vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_tum(vbool4_t vm, vfloat16m1_t vd, + vfloat16m4_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredmax_vs_f16m4_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_tum(vbool2_t vm, vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_tum(vbool2_t vm, vfloat16m1_t vd, + vfloat16m8_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredmax_vs_f16m8_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tum(vbool64_t vm, vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_tum(vbool64_t vm, vfloat32m1_t vd, + vfloat32mf2_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredmax_vs_f32mf2_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredmax_vs_f32m1_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_tum(vbool16_t vm, vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_tum(vbool16_t vm, vfloat32m1_t vd, + vfloat32m2_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredmax_vs_f32m2_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_tum(vbool8_t vm, vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_tum(vbool8_t vm, vfloat32m1_t vd, + vfloat32m4_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredmax_vs_f32m4_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_tum(vbool4_t vm, vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_tum(vbool4_t vm, vfloat32m1_t vd, + vfloat32m8_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredmax_vs_f32m8_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredmax_vs_f64m1_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_tum(vbool32_t vm, vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_tum(vbool32_t vm, vfloat64m1_t vd, + vfloat64m2_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredmax_vs_f64m2_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_tum(vbool16_t vm, vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_tum(vbool16_t vm, vfloat64m1_t vd, + vfloat64m4_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredmax_vs_f64m4_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_tum(vbool8_t vm, vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_tum(vbool8_t vm, vfloat64m1_t vd, + vfloat64m8_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredmax_vs_f64m8_f64m1_tum(vm, vd, vs2, vs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfredmin.c b/auto-generated/policy_funcs/llvm-api-tests/vfredmin.c index 1339dc706..a8c9303de 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfredmin.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfredmin.c @@ -1,127 +1,175 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_tu(vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_tu(vfloat16m1_t vd, + vfloat16mf4_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredmin_vs_f16mf4_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_tu(vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_tu(vfloat16m1_t vd, + vfloat16mf2_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredmin_vs_f16mf2_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredmin_vs_f16m1_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_tu(vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_tu(vfloat16m1_t vd, vfloat16m2_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredmin_vs_f16m2_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_tu(vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_tu(vfloat16m1_t vd, vfloat16m4_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredmin_vs_f16m4_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_tu(vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_tu(vfloat16m1_t vd, vfloat16m8_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredmin_vs_f16m8_f16m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tu(vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tu(vfloat32m1_t vd, + vfloat32mf2_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredmin_vs_f32mf2_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredmin_vs_f32m1_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_tu(vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_tu(vfloat32m1_t vd, vfloat32m2_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredmin_vs_f32m2_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_tu(vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_tu(vfloat32m1_t vd, vfloat32m4_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredmin_vs_f32m4_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_tu(vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_tu(vfloat32m1_t vd, vfloat32m8_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredmin_vs_f32m8_f32m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredmin_vs_f64m1_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_tu(vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_tu(vfloat64m1_t vd, vfloat64m2_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredmin_vs_f64m2_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_tu(vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_tu(vfloat64m1_t vd, vfloat64m4_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredmin_vs_f64m4_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_tu(vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_tu(vfloat64m1_t vd, vfloat64m8_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredmin_vs_f64m8_f64m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_tum(vbool64_t vm, vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_tum(vbool64_t vm, vfloat16m1_t vd, + vfloat16mf4_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredmin_vs_f16mf4_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_tum(vbool32_t vm, vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_tum(vbool32_t vm, vfloat16m1_t vd, + vfloat16mf2_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredmin_vs_f16mf2_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredmin_vs_f16m1_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_tum(vbool8_t vm, vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_tum(vbool8_t vm, vfloat16m1_t vd, + vfloat16m2_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredmin_vs_f16m2_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_tum(vbool4_t vm, vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_tum(vbool4_t vm, vfloat16m1_t vd, + vfloat16m4_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredmin_vs_f16m4_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_tum(vbool2_t vm, vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_tum(vbool2_t vm, vfloat16m1_t vd, + vfloat16m8_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredmin_vs_f16m8_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tum(vbool64_t vm, vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_tum(vbool64_t vm, vfloat32m1_t vd, + vfloat32mf2_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredmin_vs_f32mf2_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredmin_vs_f32m1_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_tum(vbool16_t vm, vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_tum(vbool16_t vm, vfloat32m1_t vd, + vfloat32m2_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredmin_vs_f32m2_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_tum(vbool8_t vm, vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_tum(vbool8_t vm, vfloat32m1_t vd, + vfloat32m4_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredmin_vs_f32m4_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_tum(vbool4_t vm, vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_tum(vbool4_t vm, vfloat32m1_t vd, + vfloat32m8_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredmin_vs_f32m8_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredmin_vs_f64m1_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_tum(vbool32_t vm, vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_tum(vbool32_t vm, vfloat64m1_t vd, + vfloat64m2_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredmin_vs_f64m2_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_tum(vbool16_t vm, vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_tum(vbool16_t vm, vfloat64m1_t vd, + vfloat64m4_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredmin_vs_f64m4_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_tum(vbool8_t vm, vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_tum(vbool8_t vm, vfloat64m1_t vd, + vfloat64m8_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredmin_vs_f64m8_f64m1_tum(vm, vd, vs2, vs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfredosum.c b/auto-generated/policy_funcs/llvm-api-tests/vfredosum.c index ea5d417fe..f3f53dedf 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfredosum.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfredosum.c @@ -1,247 +1,391 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_tu(vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_tu(vfloat16m1_t vd, + vfloat16mf4_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredosum_vs_f16mf4_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_tu(vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_tu(vfloat16m1_t vd, + vfloat16mf2_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredosum_vs_f16mf2_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredosum_vs_f16m1_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_tu(vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_tu(vfloat16m1_t vd, vfloat16m2_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredosum_vs_f16m2_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_tu(vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_tu(vfloat16m1_t vd, vfloat16m4_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredosum_vs_f16m4_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_tu(vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_tu(vfloat16m1_t vd, vfloat16m8_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredosum_vs_f16m8_f16m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tu(vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tu(vfloat32m1_t vd, + vfloat32mf2_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredosum_vs_f32mf2_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredosum_vs_f32m1_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_tu(vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_tu(vfloat32m1_t vd, vfloat32m2_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredosum_vs_f32m2_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_tu(vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_tu(vfloat32m1_t vd, vfloat32m4_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredosum_vs_f32m4_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_tu(vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_tu(vfloat32m1_t vd, vfloat32m8_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredosum_vs_f32m8_f32m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredosum_vs_f64m1_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_tu(vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_tu(vfloat64m1_t vd, vfloat64m2_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredosum_vs_f64m2_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_tu(vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_tu(vfloat64m1_t vd, vfloat64m4_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredosum_vs_f64m4_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_tu(vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_tu(vfloat64m1_t vd, vfloat64m8_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredosum_vs_f64m8_f64m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_tum(vbool64_t vm, vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_tum(vbool64_t vm, vfloat16m1_t vd, + vfloat16mf4_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredosum_vs_f16mf4_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_tum(vbool32_t vm, vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_tum(vbool32_t vm, vfloat16m1_t vd, + vfloat16mf2_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredosum_vs_f16mf2_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredosum_vs_f16m1_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_tum(vbool8_t vm, vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_tum(vbool8_t vm, vfloat16m1_t vd, + vfloat16m2_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredosum_vs_f16m2_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_tum(vbool4_t vm, vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_tum(vbool4_t vm, vfloat16m1_t vd, + vfloat16m4_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredosum_vs_f16m4_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_tum(vbool2_t vm, vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_tum(vbool2_t vm, vfloat16m1_t vd, + vfloat16m8_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredosum_vs_f16m8_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tum(vbool64_t vm, vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_tum(vbool64_t vm, vfloat32m1_t vd, + vfloat32mf2_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredosum_vs_f32mf2_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredosum_vs_f32m1_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_tum(vbool16_t vm, vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_tum(vbool16_t vm, vfloat32m1_t vd, + vfloat32m2_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredosum_vs_f32m2_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_tum(vbool8_t vm, vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_tum(vbool8_t vm, vfloat32m1_t vd, + vfloat32m4_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredosum_vs_f32m4_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_tum(vbool4_t vm, vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_tum(vbool4_t vm, vfloat32m1_t vd, + vfloat32m8_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredosum_vs_f32m8_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredosum_vs_f64m1_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_tum(vbool32_t vm, vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_tum(vbool32_t vm, vfloat64m1_t vd, + vfloat64m2_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredosum_vs_f64m2_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_tum(vbool16_t vm, vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_tum(vbool16_t vm, vfloat64m1_t vd, + vfloat64m4_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredosum_vs_f64m4_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_tum(vbool8_t vm, vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_tum(vbool8_t vm, vfloat64m1_t vd, + vfloat64m8_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredosum_vs_f64m8_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_rm_tu(vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { - return __riscv_vfredosum_vs_f16mf4_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_rm_tu(vfloat16m1_t vd, + vfloat16mf4_t vs2, + vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16mf4_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_rm_tu(vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { - return __riscv_vfredosum_vs_f16mf2_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_rm_tu(vfloat16m1_t vd, + vfloat16mf2_t vs2, + vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16mf2_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { - return __riscv_vfredosum_vs_f16m1_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_rm_tu(vfloat16m1_t vd, + vfloat16m1_t vs2, + vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m1_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { - return __riscv_vfredosum_vs_f16m2_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_rm_tu(vfloat16m1_t vd, + vfloat16m2_t vs2, + vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m2_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { - return __riscv_vfredosum_vs_f16m4_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_rm_tu(vfloat16m1_t vd, + vfloat16m4_t vs2, + vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m4_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { - return __riscv_vfredosum_vs_f16m8_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_rm_tu(vfloat16m1_t vd, + vfloat16m8_t vs2, + vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m8_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_rm_tu(vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfredosum_vs_f32mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_rm_tu(vfloat32m1_t vd, + vfloat32mf2_t vs2, + vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfredosum_vs_f32m1_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_rm_tu(vfloat32m1_t vd, + vfloat32m1_t vs2, + vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m1_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfredosum_vs_f32m2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_rm_tu(vfloat32m1_t vd, + vfloat32m2_t vs2, + vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfredosum_vs_f32m4_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_rm_tu(vfloat32m1_t vd, + vfloat32m4_t vs2, + vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m4_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfredosum_vs_f32m8_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_rm_tu(vfloat32m1_t vd, + vfloat32m8_t vs2, + vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m8_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfredosum_vs_f64m1_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_rm_tu(vfloat64m1_t vd, + vfloat64m1_t vs2, + vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m1_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfredosum_vs_f64m2_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_rm_tu(vfloat64m1_t vd, + vfloat64m2_t vs2, + vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m2_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfredosum_vs_f64m4_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_rm_tu(vfloat64m1_t vd, + vfloat64m4_t vs2, + vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m4_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfredosum_vs_f64m8_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_rm_tu(vfloat64m1_t vd, + vfloat64m8_t vs2, + vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m8_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_rm_tum(vbool64_t vm, vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { - return __riscv_vfredosum_vs_f16mf4_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_rm_tum(vbool64_t vm, + vfloat16m1_t vd, + vfloat16mf4_t vs2, + vfloat16m1_t vs1, + size_t vl) { + return __riscv_vfredosum_vs_f16mf4_f16m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_rm_tum(vbool32_t vm, vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { - return __riscv_vfredosum_vs_f16mf2_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_rm_tum(vbool32_t vm, + vfloat16m1_t vd, + vfloat16mf2_t vs2, + vfloat16m1_t vs1, + size_t vl) { + return __riscv_vfredosum_vs_f16mf2_f16m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { - return __riscv_vfredosum_vs_f16m1_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, + vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m1_f16m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_rm_tum(vbool8_t vm, vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { - return __riscv_vfredosum_vs_f16m2_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_rm_tum(vbool8_t vm, vfloat16m1_t vd, + vfloat16m2_t vs2, + vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m2_f16m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_rm_tum(vbool4_t vm, vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { - return __riscv_vfredosum_vs_f16m4_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_rm_tum(vbool4_t vm, vfloat16m1_t vd, + vfloat16m4_t vs2, + vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m4_f16m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_rm_tum(vbool2_t vm, vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { - return __riscv_vfredosum_vs_f16m8_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_rm_tum(vbool2_t vm, vfloat16m1_t vd, + vfloat16m8_t vs2, + vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f16m8_f16m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_rm_tum(vbool64_t vm, vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfredosum_vs_f32mf2_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_rm_tum(vbool64_t vm, + vfloat32m1_t vd, + vfloat32mf2_t vs2, + vfloat32m1_t vs1, + size_t vl) { + return __riscv_vfredosum_vs_f32mf2_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfredosum_vs_f32m1_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m1_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_rm_tum(vbool16_t vm, vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfredosum_vs_f32m2_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_rm_tum(vbool16_t vm, vfloat32m1_t vd, + vfloat32m2_t vs2, + vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m2_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_rm_tum(vbool8_t vm, vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfredosum_vs_f32m4_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_rm_tum(vbool8_t vm, vfloat32m1_t vd, + vfloat32m4_t vs2, + vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m4_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_rm_tum(vbool4_t vm, vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfredosum_vs_f32m8_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_rm_tum(vbool4_t vm, vfloat32m1_t vd, + vfloat32m8_t vs2, + vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f32m8_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfredosum_vs_f64m1_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, + vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m1_f64m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_rm_tum(vbool32_t vm, vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfredosum_vs_f64m2_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_rm_tum(vbool32_t vm, vfloat64m1_t vd, + vfloat64m2_t vs2, + vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m2_f64m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_rm_tum(vbool16_t vm, vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfredosum_vs_f64m4_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_rm_tum(vbool16_t vm, vfloat64m1_t vd, + vfloat64m4_t vs2, + vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m4_f64m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_rm_tum(vbool8_t vm, vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfredosum_vs_f64m8_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_rm_tum(vbool8_t vm, vfloat64m1_t vd, + vfloat64m8_t vs2, + vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredosum_vs_f64m8_f64m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfredusum.c b/auto-generated/policy_funcs/llvm-api-tests/vfredusum.c index a06a976d6..fbee58ecc 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfredusum.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfredusum.c @@ -1,247 +1,391 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_tu(vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_tu(vfloat16m1_t vd, + vfloat16mf4_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredusum_vs_f16mf4_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_tu(vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_tu(vfloat16m1_t vd, + vfloat16mf2_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredusum_vs_f16mf2_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredusum_vs_f16m1_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_tu(vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_tu(vfloat16m1_t vd, vfloat16m2_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredusum_vs_f16m2_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_tu(vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_tu(vfloat16m1_t vd, vfloat16m4_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredusum_vs_f16m4_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_tu(vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_tu(vfloat16m1_t vd, vfloat16m8_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredusum_vs_f16m8_f16m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tu(vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tu(vfloat32m1_t vd, + vfloat32mf2_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredusum_vs_f32mf2_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredusum_vs_f32m1_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_tu(vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_tu(vfloat32m1_t vd, vfloat32m2_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredusum_vs_f32m2_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_tu(vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_tu(vfloat32m1_t vd, vfloat32m4_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredusum_vs_f32m4_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_tu(vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_tu(vfloat32m1_t vd, vfloat32m8_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredusum_vs_f32m8_f32m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredusum_vs_f64m1_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_tu(vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_tu(vfloat64m1_t vd, vfloat64m2_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredusum_vs_f64m2_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_tu(vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_tu(vfloat64m1_t vd, vfloat64m4_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredusum_vs_f64m4_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_tu(vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_tu(vfloat64m1_t vd, vfloat64m8_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredusum_vs_f64m8_f64m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_tum(vbool64_t vm, vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_tum(vbool64_t vm, vfloat16m1_t vd, + vfloat16mf4_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredusum_vs_f16mf4_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_tum(vbool32_t vm, vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_tum(vbool32_t vm, vfloat16m1_t vd, + vfloat16mf2_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredusum_vs_f16mf2_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredusum_vs_f16m1_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_tum(vbool8_t vm, vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_tum(vbool8_t vm, vfloat16m1_t vd, + vfloat16m2_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredusum_vs_f16m2_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_tum(vbool4_t vm, vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_tum(vbool4_t vm, vfloat16m1_t vd, + vfloat16m4_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredusum_vs_f16m4_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_tum(vbool2_t vm, vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_tum(vbool2_t vm, vfloat16m1_t vd, + vfloat16m8_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfredusum_vs_f16m8_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tum(vbool64_t vm, vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_tum(vbool64_t vm, vfloat32m1_t vd, + vfloat32mf2_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredusum_vs_f32mf2_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredusum_vs_f32m1_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_tum(vbool16_t vm, vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_tum(vbool16_t vm, vfloat32m1_t vd, + vfloat32m2_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredusum_vs_f32m2_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_tum(vbool8_t vm, vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_tum(vbool8_t vm, vfloat32m1_t vd, + vfloat32m4_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredusum_vs_f32m4_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_tum(vbool4_t vm, vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_tum(vbool4_t vm, vfloat32m1_t vd, + vfloat32m8_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfredusum_vs_f32m8_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredusum_vs_f64m1_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_tum(vbool32_t vm, vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_tum(vbool32_t vm, vfloat64m1_t vd, + vfloat64m2_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredusum_vs_f64m2_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_tum(vbool16_t vm, vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_tum(vbool16_t vm, vfloat64m1_t vd, + vfloat64m4_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredusum_vs_f64m4_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_tum(vbool8_t vm, vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_tum(vbool8_t vm, vfloat64m1_t vd, + vfloat64m8_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfredusum_vs_f64m8_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_rm_tu(vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { - return __riscv_vfredusum_vs_f16mf4_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_rm_tu(vfloat16m1_t vd, + vfloat16mf4_t vs2, + vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16mf4_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_rm_tu(vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { - return __riscv_vfredusum_vs_f16mf2_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_rm_tu(vfloat16m1_t vd, + vfloat16mf2_t vs2, + vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16mf2_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { - return __riscv_vfredusum_vs_f16m1_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_rm_tu(vfloat16m1_t vd, + vfloat16m1_t vs2, + vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m1_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { - return __riscv_vfredusum_vs_f16m2_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_rm_tu(vfloat16m1_t vd, + vfloat16m2_t vs2, + vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m2_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { - return __riscv_vfredusum_vs_f16m4_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_rm_tu(vfloat16m1_t vd, + vfloat16m4_t vs2, + vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m4_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { - return __riscv_vfredusum_vs_f16m8_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_rm_tu(vfloat16m1_t vd, + vfloat16m8_t vs2, + vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m8_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_rm_tu(vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfredusum_vs_f32mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_rm_tu(vfloat32m1_t vd, + vfloat32mf2_t vs2, + vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfredusum_vs_f32m1_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_rm_tu(vfloat32m1_t vd, + vfloat32m1_t vs2, + vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m1_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfredusum_vs_f32m2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_rm_tu(vfloat32m1_t vd, + vfloat32m2_t vs2, + vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfredusum_vs_f32m4_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_rm_tu(vfloat32m1_t vd, + vfloat32m4_t vs2, + vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m4_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfredusum_vs_f32m8_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_rm_tu(vfloat32m1_t vd, + vfloat32m8_t vs2, + vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m8_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfredusum_vs_f64m1_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_rm_tu(vfloat64m1_t vd, + vfloat64m1_t vs2, + vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m1_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfredusum_vs_f64m2_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_rm_tu(vfloat64m1_t vd, + vfloat64m2_t vs2, + vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m2_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfredusum_vs_f64m4_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_rm_tu(vfloat64m1_t vd, + vfloat64m4_t vs2, + vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m4_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfredusum_vs_f64m8_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_rm_tu(vfloat64m1_t vd, + vfloat64m8_t vs2, + vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m8_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_rm_tum(vbool64_t vm, vfloat16m1_t vd, vfloat16mf4_t vs2, vfloat16m1_t vs1, size_t vl) { - return __riscv_vfredusum_vs_f16mf4_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_rm_tum(vbool64_t vm, + vfloat16m1_t vd, + vfloat16mf4_t vs2, + vfloat16m1_t vs1, + size_t vl) { + return __riscv_vfredusum_vs_f16mf4_f16m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_rm_tum(vbool32_t vm, vfloat16m1_t vd, vfloat16mf2_t vs2, vfloat16m1_t vs1, size_t vl) { - return __riscv_vfredusum_vs_f16mf2_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_rm_tum(vbool32_t vm, + vfloat16m1_t vd, + vfloat16mf2_t vs2, + vfloat16m1_t vs1, + size_t vl) { + return __riscv_vfredusum_vs_f16mf2_f16m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { - return __riscv_vfredusum_vs_f16m1_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, + vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m1_f16m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_rm_tum(vbool8_t vm, vfloat16m1_t vd, vfloat16m2_t vs2, vfloat16m1_t vs1, size_t vl) { - return __riscv_vfredusum_vs_f16m2_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_rm_tum(vbool8_t vm, vfloat16m1_t vd, + vfloat16m2_t vs2, + vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m2_f16m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_rm_tum(vbool4_t vm, vfloat16m1_t vd, vfloat16m4_t vs2, vfloat16m1_t vs1, size_t vl) { - return __riscv_vfredusum_vs_f16m4_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_rm_tum(vbool4_t vm, vfloat16m1_t vd, + vfloat16m4_t vs2, + vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m4_f16m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_rm_tum(vbool2_t vm, vfloat16m1_t vd, vfloat16m8_t vs2, vfloat16m1_t vs1, size_t vl) { - return __riscv_vfredusum_vs_f16m8_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_rm_tum(vbool2_t vm, vfloat16m1_t vd, + vfloat16m8_t vs2, + vfloat16m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f16m8_f16m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_rm_tum(vbool64_t vm, vfloat32m1_t vd, vfloat32mf2_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfredusum_vs_f32mf2_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_rm_tum(vbool64_t vm, + vfloat32m1_t vd, + vfloat32mf2_t vs2, + vfloat32m1_t vs1, + size_t vl) { + return __riscv_vfredusum_vs_f32mf2_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfredusum_vs_f32m1_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m1_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_rm_tum(vbool16_t vm, vfloat32m1_t vd, vfloat32m2_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfredusum_vs_f32m2_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_rm_tum(vbool16_t vm, vfloat32m1_t vd, + vfloat32m2_t vs2, + vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m2_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_rm_tum(vbool8_t vm, vfloat32m1_t vd, vfloat32m4_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfredusum_vs_f32m4_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_rm_tum(vbool8_t vm, vfloat32m1_t vd, + vfloat32m4_t vs2, + vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m4_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_rm_tum(vbool4_t vm, vfloat32m1_t vd, vfloat32m8_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfredusum_vs_f32m8_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_rm_tum(vbool4_t vm, vfloat32m1_t vd, + vfloat32m8_t vs2, + vfloat32m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f32m8_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfredusum_vs_f64m1_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, + vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m1_f64m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_rm_tum(vbool32_t vm, vfloat64m1_t vd, vfloat64m2_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfredusum_vs_f64m2_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_rm_tum(vbool32_t vm, vfloat64m1_t vd, + vfloat64m2_t vs2, + vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m2_f64m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_rm_tum(vbool16_t vm, vfloat64m1_t vd, vfloat64m4_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfredusum_vs_f64m4_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_rm_tum(vbool16_t vm, vfloat64m1_t vd, + vfloat64m4_t vs2, + vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m4_f64m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_rm_tum(vbool8_t vm, vfloat64m1_t vd, vfloat64m8_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfredusum_vs_f64m8_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_rm_tum(vbool8_t vm, vfloat64m1_t vd, + vfloat64m8_t vs2, + vfloat64m1_t vs1, size_t vl) { + return __riscv_vfredusum_vs_f64m8_f64m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfrsqrt7.c b/auto-generated/policy_funcs/llvm-api-tests/vfrsqrt7.c index d3cf59cb6..626069c1a 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfrsqrt7.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfrsqrt7.c @@ -1,247 +1,307 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vfrsqrt7_v_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfrsqrt7_v_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfrsqrt7_v_f16mf4_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfrsqrt7_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfrsqrt7_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfrsqrt7_v_f16mf2_tu(vd, vs2, vl); } -vfloat16m1_t test_vfrsqrt7_v_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfrsqrt7_v_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfrsqrt7_v_f16m1_tu(vd, vs2, vl); } -vfloat16m2_t test_vfrsqrt7_v_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfrsqrt7_v_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfrsqrt7_v_f16m2_tu(vd, vs2, vl); } -vfloat16m4_t test_vfrsqrt7_v_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfrsqrt7_v_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfrsqrt7_v_f16m4_tu(vd, vs2, vl); } -vfloat16m8_t test_vfrsqrt7_v_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfrsqrt7_v_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfrsqrt7_v_f16m8_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfrsqrt7_v_f32mf2_tu(vd, vs2, vl); } -vfloat32m1_t test_vfrsqrt7_v_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfrsqrt7_v_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfrsqrt7_v_f32m1_tu(vd, vs2, vl); } -vfloat32m2_t test_vfrsqrt7_v_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfrsqrt7_v_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfrsqrt7_v_f32m2_tu(vd, vs2, vl); } -vfloat32m4_t test_vfrsqrt7_v_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfrsqrt7_v_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfrsqrt7_v_f32m4_tu(vd, vs2, vl); } -vfloat32m8_t test_vfrsqrt7_v_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfrsqrt7_v_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfrsqrt7_v_f32m8_tu(vd, vs2, vl); } -vfloat64m1_t test_vfrsqrt7_v_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfrsqrt7_v_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfrsqrt7_v_f64m1_tu(vd, vs2, vl); } -vfloat64m2_t test_vfrsqrt7_v_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfrsqrt7_v_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfrsqrt7_v_f64m2_tu(vd, vs2, vl); } -vfloat64m4_t test_vfrsqrt7_v_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfrsqrt7_v_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfrsqrt7_v_f64m4_tu(vd, vs2, vl); } -vfloat64m8_t test_vfrsqrt7_v_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfrsqrt7_v_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfrsqrt7_v_f64m8_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfrsqrt7_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfrsqrt7_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f16mf4_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfrsqrt7_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfrsqrt7_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f16mf2_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfrsqrt7_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfrsqrt7_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f16m1_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfrsqrt7_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfrsqrt7_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f16m2_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfrsqrt7_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfrsqrt7_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f16m4_tum(vm, vd, vs2, vl); } -vfloat16m8_t test_vfrsqrt7_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfrsqrt7_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f16m8_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f32mf2_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfrsqrt7_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfrsqrt7_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f32m1_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfrsqrt7_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfrsqrt7_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f32m2_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfrsqrt7_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfrsqrt7_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f32m4_tum(vm, vd, vs2, vl); } -vfloat32m8_t test_vfrsqrt7_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfrsqrt7_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f32m8_tum(vm, vd, vs2, vl); } -vfloat64m1_t test_vfrsqrt7_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfrsqrt7_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f64m1_tum(vm, vd, vs2, vl); } -vfloat64m2_t test_vfrsqrt7_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfrsqrt7_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f64m2_tum(vm, vd, vs2, vl); } -vfloat64m4_t test_vfrsqrt7_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfrsqrt7_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f64m4_tum(vm, vd, vs2, vl); } -vfloat64m8_t test_vfrsqrt7_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfrsqrt7_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f64m8_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfrsqrt7_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfrsqrt7_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f16mf4_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfrsqrt7_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfrsqrt7_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f16mf2_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfrsqrt7_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfrsqrt7_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f16m1_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfrsqrt7_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfrsqrt7_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f16m2_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfrsqrt7_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfrsqrt7_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f16m4_tumu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfrsqrt7_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfrsqrt7_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f16m8_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f32mf2_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfrsqrt7_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfrsqrt7_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f32m1_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfrsqrt7_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfrsqrt7_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f32m2_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfrsqrt7_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfrsqrt7_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f32m4_tumu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfrsqrt7_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfrsqrt7_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f32m8_tumu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfrsqrt7_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfrsqrt7_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f64m1_tumu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfrsqrt7_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfrsqrt7_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f64m2_tumu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfrsqrt7_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfrsqrt7_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f64m4_tumu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfrsqrt7_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfrsqrt7_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f64m8_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfrsqrt7_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfrsqrt7_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f16mf4_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfrsqrt7_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfrsqrt7_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f16mf2_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfrsqrt7_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfrsqrt7_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f16m1_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfrsqrt7_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfrsqrt7_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f16m2_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfrsqrt7_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfrsqrt7_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f16m4_mu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfrsqrt7_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfrsqrt7_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f16m8_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfrsqrt7_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfrsqrt7_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f32mf2_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfrsqrt7_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfrsqrt7_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f32m1_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfrsqrt7_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfrsqrt7_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f32m2_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfrsqrt7_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfrsqrt7_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f32m4_mu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfrsqrt7_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfrsqrt7_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f32m8_mu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfrsqrt7_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfrsqrt7_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f64m1_mu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfrsqrt7_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfrsqrt7_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f64m2_mu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfrsqrt7_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfrsqrt7_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f64m4_mu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfrsqrt7_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfrsqrt7_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfrsqrt7_v_f64m8_mu(vm, vd, vs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfrsub.c b/auto-generated/policy_funcs/llvm-api-tests/vfrsub.c index 4d4f7412e..3e4f103cb 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfrsub.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfrsub.c @@ -1,487 +1,680 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vfrsub_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfrsub_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfrsub_vf_f16mf4_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfrsub_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfrsub_vf_f16mf2_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfrsub_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfrsub_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfrsub_vf_f16m1_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfrsub_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfrsub_vf_f16m2_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfrsub_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfrsub_vf_f16m4_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfrsub_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfrsub_vf_f16m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfrsub_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + float rs1, size_t vl) { return __riscv_vfrsub_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfrsub_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfrsub_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + float rs1, size_t vl) { return __riscv_vfrsub_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfrsub_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + float rs1, size_t vl) { return __riscv_vfrsub_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfrsub_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + float rs1, size_t vl) { return __riscv_vfrsub_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfrsub_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + float rs1, size_t vl) { return __riscv_vfrsub_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfrsub_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + double rs1, size_t vl) { return __riscv_vfrsub_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfrsub_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + double rs1, size_t vl) { return __riscv_vfrsub_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfrsub_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + double rs1, size_t vl) { return __riscv_vfrsub_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfrsub_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + double rs1, size_t vl) { return __riscv_vfrsub_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfrsub_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfrsub_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16mf4_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfrsub_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfrsub_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfrsub_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16m1_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfrsub_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16m2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfrsub_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16m4_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfrsub_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfrsub_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfrsub_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfrsub_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfrsub_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfrsub_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfrsub_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfrsub_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfrsub_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfrsub_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfrsub_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfrsub_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfrsub_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vfrsub_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfrsub_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vfrsub_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfrsub_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vfrsub_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfrsub_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, size_t vl) { return __riscv_vfrsub_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfrsub_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfrsub_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16mf4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfrsub_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfrsub_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfrsub_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfrsub_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfrsub_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfrsub_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfrsub_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfrsub_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfrsub_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfrsub_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfrsub_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfrsub_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfrsub_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfrsub_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfrsub_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfrsub_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfrsub_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfrsub_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, + size_t vl) { return __riscv_vfrsub_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfrsub_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, + size_t vl) { return __riscv_vfrsub_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfrsub_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, + size_t vl) { return __riscv_vfrsub_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfrsub_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, + size_t vl) { return __riscv_vfrsub_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfrsub_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfrsub_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16mf4_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfrsub_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfrsub_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfrsub_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16m1_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfrsub_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16m2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfrsub_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16m4_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfrsub_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfrsub_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfrsub_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfrsub_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfrsub_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfrsub_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfrsub_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfrsub_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfrsub_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfrsub_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfrsub_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfrsub_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfrsub_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vfrsub_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfrsub_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vfrsub_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfrsub_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vfrsub_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfrsub_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, size_t vl) { return __riscv_vfrsub_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfrsub_vf_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfrsub_vf_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfrsub_vf_f16mf4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfrsub_vf_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfrsub_vf_f16mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrsub_vf_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfrsub_vf_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfrsub_vf_f16m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfrsub_vf_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfrsub_vf_f16m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfrsub_vf_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfrsub_vf_f16m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfrsub_vf_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfrsub_vf_f16m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfrsub_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + float rs1, size_t vl) { return __riscv_vfrsub_vf_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrsub_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfrsub_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + float rs1, size_t vl) { return __riscv_vfrsub_vf_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfrsub_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + float rs1, size_t vl) { return __riscv_vfrsub_vf_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfrsub_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + float rs1, size_t vl) { return __riscv_vfrsub_vf_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfrsub_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + float rs1, size_t vl) { return __riscv_vfrsub_vf_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfrsub_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + double rs1, size_t vl) { return __riscv_vfrsub_vf_f64m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfrsub_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + double rs1, size_t vl) { return __riscv_vfrsub_vf_f64m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfrsub_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + double rs1, size_t vl) { return __riscv_vfrsub_vf_f64m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfrsub_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + double rs1, size_t vl) { return __riscv_vfrsub_vf_f64m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrsub_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfrsub_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16mf4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfrsub_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrsub_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfrsub_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfrsub_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfrsub_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfrsub_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfrsub_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfrsub_vf_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrsub_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfrsub_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, + size_t vl) { return __riscv_vfrsub_vf_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfrsub_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, + size_t vl) { return __riscv_vfrsub_vf_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfrsub_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, + size_t vl) { return __riscv_vfrsub_vf_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfrsub_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, + size_t vl) { return __riscv_vfrsub_vf_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfrsub_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, + size_t vl) { return __riscv_vfrsub_vf_f64m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfrsub_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, + size_t vl) { return __riscv_vfrsub_vf_f64m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfrsub_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, + size_t vl) { return __riscv_vfrsub_vf_f64m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfrsub_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, + size_t vl) { return __riscv_vfrsub_vf_f64m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrsub_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { - return __riscv_vfrsub_vf_f16mf4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +vfloat16mf4_t test_vfrsub_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { + return __riscv_vfrsub_vf_f16mf4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { - return __riscv_vfrsub_vf_f16mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +vfloat16mf2_t test_vfrsub_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { + return __riscv_vfrsub_vf_f16mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); } -vfloat16m1_t test_vfrsub_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfrsub_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfrsub_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfrsub_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfrsub_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { - return __riscv_vfrsub_vf_f32mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfrsub_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { + return __riscv_vfrsub_vf_f32mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfrsub_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfrsub_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, + size_t vl) { return __riscv_vfrsub_vf_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfrsub_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, + size_t vl) { return __riscv_vfrsub_vf_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfrsub_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, + size_t vl) { return __riscv_vfrsub_vf_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfrsub_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, + size_t vl) { return __riscv_vfrsub_vf_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfrsub_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, + size_t vl) { return __riscv_vfrsub_vf_f64m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfrsub_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, + size_t vl) { return __riscv_vfrsub_vf_f64m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfrsub_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, + size_t vl) { return __riscv_vfrsub_vf_f64m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfrsub_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, + size_t vl) { return __riscv_vfrsub_vf_f64m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfrsub_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfrsub_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16mf4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfrsub_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfrsub_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfrsub_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfrsub_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfrsub_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfrsub_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfrsub_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfrsub_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfrsub_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfrsub_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfrsub_vf_f16m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfrsub_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfrsub_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfrsub_vf_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfrsub_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfrsub_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, + size_t vl) { return __riscv_vfrsub_vf_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfrsub_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfrsub_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, + size_t vl) { return __riscv_vfrsub_vf_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfrsub_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfrsub_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, + size_t vl) { return __riscv_vfrsub_vf_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfrsub_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfrsub_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, + size_t vl) { return __riscv_vfrsub_vf_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfrsub_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfrsub_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, + size_t vl) { return __riscv_vfrsub_vf_f64m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfrsub_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfrsub_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, + size_t vl) { return __riscv_vfrsub_vf_f64m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfrsub_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfrsub_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, + size_t vl) { return __riscv_vfrsub_vf_f64m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfrsub_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfrsub_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, + size_t vl) { return __riscv_vfrsub_vf_f64m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfsgnj.c b/auto-generated/policy_funcs/llvm-api-tests/vfsgnj.c index ad234465b..a8b414d8e 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfsgnj.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfsgnj.c @@ -1,487 +1,677 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vfsgnj_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfsgnj_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + vfloat16mf4_t vs1, size_t vl) { return __riscv_vfsgnj_vv_f16mf4_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnj_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfsgnj_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfsgnj_vf_f16mf4_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnj_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfsgnj_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + vfloat16mf2_t vs1, size_t vl) { return __riscv_vfsgnj_vv_f16mf2_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnj_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfsgnj_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfsgnj_vf_f16mf2_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnj_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfsgnj_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfsgnj_vv_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnj_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfsgnj_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfsgnj_vf_f16m1_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnj_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfsgnj_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + vfloat16m2_t vs1, size_t vl) { return __riscv_vfsgnj_vv_f16m2_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnj_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfsgnj_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfsgnj_vf_f16m2_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnj_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfsgnj_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + vfloat16m4_t vs1, size_t vl) { return __riscv_vfsgnj_vv_f16m4_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnj_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfsgnj_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfsgnj_vf_f16m4_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnj_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfsgnj_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + vfloat16m8_t vs1, size_t vl) { return __riscv_vfsgnj_vv_f16m8_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnj_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfsgnj_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfsgnj_vf_f16m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnj_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfsgnj_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + vfloat32mf2_t vs1, size_t vl) { return __riscv_vfsgnj_vv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnj_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfsgnj_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + float rs1, size_t vl) { return __riscv_vfsgnj_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnj_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfsgnj_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfsgnj_vv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnj_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfsgnj_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + float rs1, size_t vl) { return __riscv_vfsgnj_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnj_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfsgnj_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + vfloat32m2_t vs1, size_t vl) { return __riscv_vfsgnj_vv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnj_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfsgnj_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + float rs1, size_t vl) { return __riscv_vfsgnj_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnj_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfsgnj_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + vfloat32m4_t vs1, size_t vl) { return __riscv_vfsgnj_vv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnj_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfsgnj_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + float rs1, size_t vl) { return __riscv_vfsgnj_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnj_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfsgnj_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + vfloat32m8_t vs1, size_t vl) { return __riscv_vfsgnj_vv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnj_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfsgnj_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + float rs1, size_t vl) { return __riscv_vfsgnj_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnj_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfsgnj_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfsgnj_vv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnj_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfsgnj_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + double rs1, size_t vl) { return __riscv_vfsgnj_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnj_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfsgnj_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + vfloat64m2_t vs1, size_t vl) { return __riscv_vfsgnj_vv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnj_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfsgnj_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + double rs1, size_t vl) { return __riscv_vfsgnj_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnj_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfsgnj_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + vfloat64m4_t vs1, size_t vl) { return __riscv_vfsgnj_vv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnj_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfsgnj_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + double rs1, size_t vl) { return __riscv_vfsgnj_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnj_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfsgnj_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + vfloat64m8_t vs1, size_t vl) { return __riscv_vfsgnj_vv_f64m8_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnj_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfsgnj_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + double rs1, size_t vl) { return __riscv_vfsgnj_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnj_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfsgnj_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f16mf4_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnj_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfsgnj_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnj_vf_f16mf4_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnj_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfsgnj_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f16mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnj_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfsgnj_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnj_vf_f16mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnj_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfsgnj_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnj_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfsgnj_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnj_vf_f16m1_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnj_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfsgnj_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f16m2_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnj_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfsgnj_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnj_vf_f16m2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnj_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfsgnj_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f16m4_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnj_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfsgnj_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnj_vf_f16m4_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnj_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfsgnj_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f16m8_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnj_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfsgnj_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnj_vf_f16m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnj_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfsgnj_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnj_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfsgnj_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfsgnj_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnj_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfsgnj_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnj_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfsgnj_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfsgnj_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnj_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfsgnj_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnj_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfsgnj_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfsgnj_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnj_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfsgnj_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnj_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfsgnj_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfsgnj_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnj_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfsgnj_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnj_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfsgnj_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfsgnj_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnj_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfsgnj_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnj_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfsgnj_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vfsgnj_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnj_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfsgnj_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnj_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfsgnj_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vfsgnj_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnj_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfsgnj_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnj_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfsgnj_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vfsgnj_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnj_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfsgnj_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnj_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfsgnj_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, size_t vl) { return __riscv_vfsgnj_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnj_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfsgnj_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f16mf4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnj_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfsgnj_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnj_vf_f16mf4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnj_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfsgnj_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f16mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnj_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfsgnj_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnj_vf_f16mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnj_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfsgnj_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f16m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnj_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfsgnj_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnj_vf_f16m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnj_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfsgnj_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f16m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnj_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfsgnj_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnj_vf_f16m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnj_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfsgnj_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f16m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnj_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfsgnj_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnj_vf_f16m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnj_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfsgnj_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f16m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnj_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfsgnj_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnj_vf_f16m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnj_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfsgnj_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnj_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfsgnj_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfsgnj_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnj_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfsgnj_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnj_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfsgnj_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfsgnj_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnj_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfsgnj_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnj_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfsgnj_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfsgnj_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnj_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfsgnj_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnj_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfsgnj_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfsgnj_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnj_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfsgnj_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnj_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfsgnj_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfsgnj_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnj_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfsgnj_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnj_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfsgnj_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, + size_t vl) { return __riscv_vfsgnj_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnj_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfsgnj_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnj_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfsgnj_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, + size_t vl) { return __riscv_vfsgnj_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnj_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfsgnj_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnj_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfsgnj_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, + size_t vl) { return __riscv_vfsgnj_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnj_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfsgnj_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnj_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfsgnj_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, + size_t vl) { return __riscv_vfsgnj_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnj_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfsgnj_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f16mf4_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnj_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfsgnj_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnj_vf_f16mf4_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnj_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfsgnj_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f16mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnj_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfsgnj_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnj_vf_f16mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnj_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfsgnj_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f16m1_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnj_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfsgnj_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnj_vf_f16m1_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnj_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfsgnj_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f16m2_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnj_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfsgnj_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnj_vf_f16m2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnj_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfsgnj_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f16m4_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnj_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfsgnj_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnj_vf_f16m4_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnj_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfsgnj_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f16m8_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnj_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfsgnj_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnj_vf_f16m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnj_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfsgnj_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnj_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfsgnj_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfsgnj_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnj_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfsgnj_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnj_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfsgnj_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfsgnj_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnj_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfsgnj_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnj_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfsgnj_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfsgnj_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnj_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfsgnj_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnj_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfsgnj_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfsgnj_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnj_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfsgnj_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnj_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfsgnj_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfsgnj_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnj_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfsgnj_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnj_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfsgnj_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vfsgnj_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnj_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfsgnj_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnj_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfsgnj_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vfsgnj_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnj_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfsgnj_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnj_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfsgnj_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vfsgnj_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnj_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfsgnj_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfsgnj_vv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnj_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfsgnj_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, size_t vl) { return __riscv_vfsgnj_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfsgnjn.c b/auto-generated/policy_funcs/llvm-api-tests/vfsgnjn.c index 2bfd70a74..7e59fa644 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfsgnjn.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfsgnjn.c @@ -1,487 +1,685 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vfsgnjn_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfsgnjn_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + vfloat16mf4_t vs1, size_t vl) { return __riscv_vfsgnjn_vv_f16mf4_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjn_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfsgnjn_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfsgnjn_vf_f16mf4_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjn_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfsgnjn_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + vfloat16mf2_t vs1, size_t vl) { return __riscv_vfsgnjn_vv_f16mf2_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjn_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfsgnjn_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfsgnjn_vf_f16mf2_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjn_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfsgnjn_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfsgnjn_vv_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjn_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfsgnjn_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfsgnjn_vf_f16m1_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjn_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfsgnjn_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + vfloat16m2_t vs1, size_t vl) { return __riscv_vfsgnjn_vv_f16m2_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjn_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfsgnjn_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfsgnjn_vf_f16m2_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjn_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfsgnjn_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + vfloat16m4_t vs1, size_t vl) { return __riscv_vfsgnjn_vv_f16m4_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjn_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfsgnjn_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfsgnjn_vf_f16m4_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjn_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfsgnjn_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + vfloat16m8_t vs1, size_t vl) { return __riscv_vfsgnjn_vv_f16m8_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjn_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfsgnjn_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfsgnjn_vf_f16m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + vfloat32mf2_t vs1, size_t vl) { return __riscv_vfsgnjn_vv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + float rs1, size_t vl) { return __riscv_vfsgnjn_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjn_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfsgnjn_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfsgnjn_vv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjn_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfsgnjn_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + float rs1, size_t vl) { return __riscv_vfsgnjn_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjn_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfsgnjn_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + vfloat32m2_t vs1, size_t vl) { return __riscv_vfsgnjn_vv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjn_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfsgnjn_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + float rs1, size_t vl) { return __riscv_vfsgnjn_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjn_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfsgnjn_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + vfloat32m4_t vs1, size_t vl) { return __riscv_vfsgnjn_vv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjn_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfsgnjn_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + float rs1, size_t vl) { return __riscv_vfsgnjn_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjn_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfsgnjn_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + vfloat32m8_t vs1, size_t vl) { return __riscv_vfsgnjn_vv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjn_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfsgnjn_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + float rs1, size_t vl) { return __riscv_vfsgnjn_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjn_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfsgnjn_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfsgnjn_vv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjn_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfsgnjn_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + double rs1, size_t vl) { return __riscv_vfsgnjn_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjn_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfsgnjn_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + vfloat64m2_t vs1, size_t vl) { return __riscv_vfsgnjn_vv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjn_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfsgnjn_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + double rs1, size_t vl) { return __riscv_vfsgnjn_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjn_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfsgnjn_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + vfloat64m4_t vs1, size_t vl) { return __riscv_vfsgnjn_vv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjn_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfsgnjn_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + double rs1, size_t vl) { return __riscv_vfsgnjn_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjn_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfsgnjn_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + vfloat64m8_t vs1, size_t vl) { return __riscv_vfsgnjn_vv_f64m8_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjn_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfsgnjn_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + double rs1, size_t vl) { return __riscv_vfsgnjn_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnjn_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfsgnjn_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f16mf4_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjn_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfsgnjn_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f16mf4_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjn_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfsgnjn_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f16mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjn_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfsgnjn_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f16mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjn_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfsgnjn_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjn_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfsgnjn_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f16m1_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjn_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfsgnjn_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f16m2_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjn_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfsgnjn_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f16m2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjn_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfsgnjn_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f16m4_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjn_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfsgnjn_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f16m4_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjn_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfsgnjn_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f16m8_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjn_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfsgnjn_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f16m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjn_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfsgnjn_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjn_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfsgnjn_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfsgnjn_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjn_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfsgnjn_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjn_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfsgnjn_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfsgnjn_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjn_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfsgnjn_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjn_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfsgnjn_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfsgnjn_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjn_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfsgnjn_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjn_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfsgnjn_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfsgnjn_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjn_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfsgnjn_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjn_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfsgnjn_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjn_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfsgnjn_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjn_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfsgnjn_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjn_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfsgnjn_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjn_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfsgnjn_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjn_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfsgnjn_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjn_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfsgnjn_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnjn_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfsgnjn_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f16mf4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjn_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfsgnjn_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f16mf4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjn_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfsgnjn_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f16mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjn_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfsgnjn_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f16mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjn_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfsgnjn_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f16m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjn_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfsgnjn_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f16m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjn_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfsgnjn_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f16m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjn_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfsgnjn_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f16m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjn_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfsgnjn_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f16m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjn_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfsgnjn_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f16m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjn_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfsgnjn_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f16m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjn_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfsgnjn_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f16m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjn_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfsgnjn_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjn_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfsgnjn_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjn_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfsgnjn_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjn_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfsgnjn_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjn_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfsgnjn_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjn_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfsgnjn_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjn_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfsgnjn_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjn_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfsgnjn_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjn_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfsgnjn_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjn_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfsgnjn_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjn_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfsgnjn_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjn_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfsgnjn_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjn_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfsgnjn_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjn_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfsgnjn_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjn_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfsgnjn_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjn_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfsgnjn_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnjn_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfsgnjn_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f16mf4_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjn_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfsgnjn_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f16mf4_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjn_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfsgnjn_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f16mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjn_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfsgnjn_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f16mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjn_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfsgnjn_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f16m1_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjn_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfsgnjn_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f16m1_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjn_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfsgnjn_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f16m2_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjn_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfsgnjn_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f16m2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjn_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfsgnjn_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f16m4_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjn_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfsgnjn_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f16m4_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjn_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfsgnjn_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f16m8_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjn_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfsgnjn_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f16m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjn_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfsgnjn_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjn_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfsgnjn_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfsgnjn_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjn_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfsgnjn_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjn_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfsgnjn_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfsgnjn_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjn_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfsgnjn_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjn_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfsgnjn_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfsgnjn_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjn_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfsgnjn_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjn_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfsgnjn_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfsgnjn_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjn_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfsgnjn_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjn_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfsgnjn_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfsgnjn_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjn_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfsgnjn_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjn_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfsgnjn_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vfsgnjn_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjn_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfsgnjn_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjn_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfsgnjn_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vfsgnjn_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjn_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfsgnjn_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjn_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfsgnjn_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vfsgnjn_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjn_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfsgnjn_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfsgnjn_vv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjn_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfsgnjn_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, size_t vl) { return __riscv_vfsgnjn_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfsgnjx.c b/auto-generated/policy_funcs/llvm-api-tests/vfsgnjx.c index 581eb128b..71f8a522a 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfsgnjx.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfsgnjx.c @@ -1,487 +1,685 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vfsgnjx_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfsgnjx_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + vfloat16mf4_t vs1, size_t vl) { return __riscv_vfsgnjx_vv_f16mf4_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjx_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfsgnjx_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfsgnjx_vf_f16mf4_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjx_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfsgnjx_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + vfloat16mf2_t vs1, size_t vl) { return __riscv_vfsgnjx_vv_f16mf2_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjx_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfsgnjx_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfsgnjx_vf_f16mf2_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjx_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfsgnjx_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfsgnjx_vv_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjx_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfsgnjx_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfsgnjx_vf_f16m1_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjx_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfsgnjx_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + vfloat16m2_t vs1, size_t vl) { return __riscv_vfsgnjx_vv_f16m2_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjx_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfsgnjx_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfsgnjx_vf_f16m2_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjx_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfsgnjx_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + vfloat16m4_t vs1, size_t vl) { return __riscv_vfsgnjx_vv_f16m4_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjx_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfsgnjx_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfsgnjx_vf_f16m4_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjx_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfsgnjx_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + vfloat16m8_t vs1, size_t vl) { return __riscv_vfsgnjx_vv_f16m8_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjx_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfsgnjx_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfsgnjx_vf_f16m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + vfloat32mf2_t vs1, size_t vl) { return __riscv_vfsgnjx_vv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + float rs1, size_t vl) { return __riscv_vfsgnjx_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjx_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfsgnjx_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfsgnjx_vv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjx_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfsgnjx_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + float rs1, size_t vl) { return __riscv_vfsgnjx_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjx_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfsgnjx_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + vfloat32m2_t vs1, size_t vl) { return __riscv_vfsgnjx_vv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjx_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfsgnjx_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + float rs1, size_t vl) { return __riscv_vfsgnjx_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjx_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfsgnjx_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + vfloat32m4_t vs1, size_t vl) { return __riscv_vfsgnjx_vv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjx_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfsgnjx_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + float rs1, size_t vl) { return __riscv_vfsgnjx_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjx_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfsgnjx_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + vfloat32m8_t vs1, size_t vl) { return __riscv_vfsgnjx_vv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjx_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfsgnjx_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + float rs1, size_t vl) { return __riscv_vfsgnjx_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjx_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfsgnjx_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfsgnjx_vv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjx_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfsgnjx_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + double rs1, size_t vl) { return __riscv_vfsgnjx_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjx_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfsgnjx_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + vfloat64m2_t vs1, size_t vl) { return __riscv_vfsgnjx_vv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjx_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfsgnjx_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + double rs1, size_t vl) { return __riscv_vfsgnjx_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjx_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfsgnjx_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + vfloat64m4_t vs1, size_t vl) { return __riscv_vfsgnjx_vv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjx_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfsgnjx_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + double rs1, size_t vl) { return __riscv_vfsgnjx_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjx_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfsgnjx_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + vfloat64m8_t vs1, size_t vl) { return __riscv_vfsgnjx_vv_f64m8_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjx_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfsgnjx_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + double rs1, size_t vl) { return __riscv_vfsgnjx_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnjx_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfsgnjx_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f16mf4_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjx_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfsgnjx_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f16mf4_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjx_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfsgnjx_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f16mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjx_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfsgnjx_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f16mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjx_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfsgnjx_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjx_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfsgnjx_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f16m1_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjx_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfsgnjx_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f16m2_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjx_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfsgnjx_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f16m2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjx_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfsgnjx_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f16m4_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjx_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfsgnjx_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f16m4_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjx_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfsgnjx_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f16m8_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjx_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfsgnjx_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f16m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjx_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfsgnjx_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjx_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfsgnjx_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfsgnjx_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjx_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfsgnjx_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjx_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfsgnjx_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfsgnjx_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjx_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfsgnjx_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjx_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfsgnjx_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfsgnjx_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjx_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfsgnjx_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjx_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfsgnjx_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfsgnjx_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjx_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfsgnjx_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjx_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfsgnjx_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjx_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfsgnjx_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjx_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfsgnjx_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjx_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfsgnjx_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjx_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfsgnjx_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjx_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfsgnjx_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjx_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfsgnjx_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnjx_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfsgnjx_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f16mf4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjx_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfsgnjx_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f16mf4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjx_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfsgnjx_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f16mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjx_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfsgnjx_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f16mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjx_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfsgnjx_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f16m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjx_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfsgnjx_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f16m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjx_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfsgnjx_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f16m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjx_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfsgnjx_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f16m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjx_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfsgnjx_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f16m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjx_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfsgnjx_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f16m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjx_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfsgnjx_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f16m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjx_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfsgnjx_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f16m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjx_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfsgnjx_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjx_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfsgnjx_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjx_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfsgnjx_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjx_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfsgnjx_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjx_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfsgnjx_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjx_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfsgnjx_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjx_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfsgnjx_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjx_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfsgnjx_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjx_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfsgnjx_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjx_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfsgnjx_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjx_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfsgnjx_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjx_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfsgnjx_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjx_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfsgnjx_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjx_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfsgnjx_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjx_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfsgnjx_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjx_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfsgnjx_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsgnjx_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfsgnjx_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f16mf4_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsgnjx_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfsgnjx_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f16mf4_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsgnjx_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfsgnjx_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f16mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsgnjx_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfsgnjx_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f16mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsgnjx_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfsgnjx_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f16m1_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsgnjx_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfsgnjx_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f16m1_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsgnjx_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfsgnjx_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f16m2_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsgnjx_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfsgnjx_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f16m2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsgnjx_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfsgnjx_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f16m4_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsgnjx_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfsgnjx_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f16m4_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsgnjx_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfsgnjx_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f16m8_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsgnjx_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfsgnjx_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f16m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsgnjx_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfsgnjx_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsgnjx_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfsgnjx_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfsgnjx_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsgnjx_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfsgnjx_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsgnjx_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfsgnjx_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfsgnjx_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsgnjx_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfsgnjx_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsgnjx_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfsgnjx_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfsgnjx_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsgnjx_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfsgnjx_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsgnjx_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfsgnjx_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfsgnjx_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsgnjx_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfsgnjx_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsgnjx_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfsgnjx_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfsgnjx_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsgnjx_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfsgnjx_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsgnjx_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfsgnjx_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vfsgnjx_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsgnjx_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfsgnjx_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsgnjx_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfsgnjx_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vfsgnjx_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsgnjx_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfsgnjx_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsgnjx_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfsgnjx_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vfsgnjx_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsgnjx_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfsgnjx_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfsgnjx_vv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsgnjx_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfsgnjx_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, size_t vl) { return __riscv_vfsgnjx_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfslide1down.c b/auto-generated/policy_funcs/llvm-api-tests/vfslide1down.c index efddd128a..e41f03352 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfslide1down.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfslide1down.c @@ -1,247 +1,355 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vfslide1down_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfslide1down_vf_f16mf4_tu(vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1down_vf_f16mf4_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1down_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfslide1down_vf_f16mf2_tu(vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1down_vf_f16mf2_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfslide1down_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfslide1down_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfslide1down_vf_f16m1_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfslide1down_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfslide1down_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfslide1down_vf_f16m2_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfslide1down_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfslide1down_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfslide1down_vf_f16m4_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfslide1down_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfslide1down_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfslide1down_vf_f16m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1down_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfslide1down_vf_f32mf2_tu(vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfslide1down_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfslide1down_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfslide1down_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + float rs1, size_t vl) { return __riscv_vfslide1down_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfslide1down_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfslide1down_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + float rs1, size_t vl) { return __riscv_vfslide1down_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfslide1down_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfslide1down_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + float rs1, size_t vl) { return __riscv_vfslide1down_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfslide1down_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfslide1down_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + float rs1, size_t vl) { return __riscv_vfslide1down_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfslide1down_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfslide1down_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + double rs1, size_t vl) { return __riscv_vfslide1down_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfslide1down_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfslide1down_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + double rs1, size_t vl) { return __riscv_vfslide1down_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfslide1down_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfslide1down_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + double rs1, size_t vl) { return __riscv_vfslide1down_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfslide1down_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfslide1down_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + double rs1, size_t vl) { return __riscv_vfslide1down_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfslide1down_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfslide1down_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1down_vf_f16mf4_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1down_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfslide1down_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1down_vf_f16mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfslide1down_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfslide1down_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1down_vf_f16m1_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfslide1down_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfslide1down_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1down_vf_f16m2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfslide1down_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfslide1down_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1down_vf_f16m4_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfslide1down_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfslide1down_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1down_vf_f16m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1down_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfslide1down_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfslide1down_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfslide1down_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfslide1down_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, + size_t vl) { return __riscv_vfslide1down_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfslide1down_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfslide1down_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, + size_t vl) { return __riscv_vfslide1down_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfslide1down_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfslide1down_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, + size_t vl) { return __riscv_vfslide1down_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfslide1down_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfslide1down_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, + size_t vl) { return __riscv_vfslide1down_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfslide1down_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfslide1down_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, + size_t vl) { return __riscv_vfslide1down_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfslide1down_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfslide1down_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, + size_t vl) { return __riscv_vfslide1down_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfslide1down_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfslide1down_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, + size_t vl) { return __riscv_vfslide1down_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfslide1down_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfslide1down_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, + size_t vl) { return __riscv_vfslide1down_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfslide1down_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfslide1down_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1down_vf_f16mf4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1down_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfslide1down_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1down_vf_f16mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfslide1down_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfslide1down_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1down_vf_f16m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfslide1down_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfslide1down_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1down_vf_f16m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfslide1down_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfslide1down_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1down_vf_f16m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfslide1down_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfslide1down_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1down_vf_f16m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1down_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfslide1down_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfslide1down_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfslide1down_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfslide1down_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, + size_t vl) { return __riscv_vfslide1down_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfslide1down_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfslide1down_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, + size_t vl) { return __riscv_vfslide1down_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfslide1down_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfslide1down_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, + size_t vl) { return __riscv_vfslide1down_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfslide1down_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfslide1down_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, + size_t vl) { return __riscv_vfslide1down_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfslide1down_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfslide1down_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, + size_t vl) { return __riscv_vfslide1down_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfslide1down_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfslide1down_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, + size_t vl) { return __riscv_vfslide1down_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfslide1down_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfslide1down_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, + size_t vl) { return __riscv_vfslide1down_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfslide1down_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfslide1down_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, + size_t vl) { return __riscv_vfslide1down_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfslide1down_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfslide1down_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1down_vf_f16mf4_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1down_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfslide1down_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1down_vf_f16mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfslide1down_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfslide1down_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1down_vf_f16m1_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfslide1down_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfslide1down_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1down_vf_f16m2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfslide1down_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfslide1down_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1down_vf_f16m4_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfslide1down_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfslide1down_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1down_vf_f16m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1down_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfslide1down_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfslide1down_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfslide1down_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfslide1down_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, + size_t vl) { return __riscv_vfslide1down_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfslide1down_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfslide1down_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, + size_t vl) { return __riscv_vfslide1down_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfslide1down_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfslide1down_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, + size_t vl) { return __riscv_vfslide1down_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfslide1down_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfslide1down_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, + size_t vl) { return __riscv_vfslide1down_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfslide1down_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfslide1down_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, + size_t vl) { return __riscv_vfslide1down_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfslide1down_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfslide1down_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, + size_t vl) { return __riscv_vfslide1down_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfslide1down_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfslide1down_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, + size_t vl) { return __riscv_vfslide1down_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfslide1down_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfslide1down_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, + size_t vl) { return __riscv_vfslide1down_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfslide1up.c b/auto-generated/policy_funcs/llvm-api-tests/vfslide1up.c index b4b45e6fd..d38f65b01 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfslide1up.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfslide1up.c @@ -1,247 +1,352 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vfslide1up_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfslide1up_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfslide1up_vf_f16mf4_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1up_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfslide1up_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfslide1up_vf_f16mf2_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfslide1up_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfslide1up_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfslide1up_vf_f16m1_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfslide1up_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfslide1up_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfslide1up_vf_f16m2_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfslide1up_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfslide1up_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfslide1up_vf_f16m4_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfslide1up_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfslide1up_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfslide1up_vf_f16m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1up_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfslide1up_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + float rs1, size_t vl) { return __riscv_vfslide1up_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfslide1up_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfslide1up_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + float rs1, size_t vl) { return __riscv_vfslide1up_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfslide1up_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfslide1up_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + float rs1, size_t vl) { return __riscv_vfslide1up_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfslide1up_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfslide1up_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + float rs1, size_t vl) { return __riscv_vfslide1up_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfslide1up_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfslide1up_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + float rs1, size_t vl) { return __riscv_vfslide1up_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfslide1up_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfslide1up_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + double rs1, size_t vl) { return __riscv_vfslide1up_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfslide1up_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfslide1up_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + double rs1, size_t vl) { return __riscv_vfslide1up_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfslide1up_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfslide1up_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + double rs1, size_t vl) { return __riscv_vfslide1up_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfslide1up_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfslide1up_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + double rs1, size_t vl) { return __riscv_vfslide1up_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfslide1up_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfslide1up_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1up_vf_f16mf4_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1up_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfslide1up_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1up_vf_f16mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfslide1up_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfslide1up_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1up_vf_f16m1_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfslide1up_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfslide1up_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1up_vf_f16m2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfslide1up_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfslide1up_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1up_vf_f16m4_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfslide1up_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfslide1up_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1up_vf_f16m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1up_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfslide1up_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfslide1up_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfslide1up_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfslide1up_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, + size_t vl) { return __riscv_vfslide1up_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfslide1up_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfslide1up_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, + size_t vl) { return __riscv_vfslide1up_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfslide1up_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfslide1up_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, + size_t vl) { return __riscv_vfslide1up_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfslide1up_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfslide1up_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, + size_t vl) { return __riscv_vfslide1up_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfslide1up_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfslide1up_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, + size_t vl) { return __riscv_vfslide1up_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfslide1up_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfslide1up_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, + size_t vl) { return __riscv_vfslide1up_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfslide1up_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfslide1up_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, + size_t vl) { return __riscv_vfslide1up_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfslide1up_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfslide1up_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, + size_t vl) { return __riscv_vfslide1up_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfslide1up_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfslide1up_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1up_vf_f16mf4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1up_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfslide1up_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1up_vf_f16mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfslide1up_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfslide1up_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1up_vf_f16m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfslide1up_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfslide1up_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1up_vf_f16m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfslide1up_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfslide1up_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1up_vf_f16m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfslide1up_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfslide1up_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1up_vf_f16m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1up_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfslide1up_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfslide1up_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfslide1up_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfslide1up_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, + size_t vl) { return __riscv_vfslide1up_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfslide1up_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfslide1up_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, + size_t vl) { return __riscv_vfslide1up_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfslide1up_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfslide1up_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, + size_t vl) { return __riscv_vfslide1up_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfslide1up_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfslide1up_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, + size_t vl) { return __riscv_vfslide1up_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfslide1up_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfslide1up_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, + size_t vl) { return __riscv_vfslide1up_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfslide1up_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfslide1up_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, + size_t vl) { return __riscv_vfslide1up_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfslide1up_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfslide1up_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, + size_t vl) { return __riscv_vfslide1up_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfslide1up_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfslide1up_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, + size_t vl) { return __riscv_vfslide1up_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfslide1up_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfslide1up_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1up_vf_f16mf4_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfslide1up_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfslide1up_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1up_vf_f16mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfslide1up_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfslide1up_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1up_vf_f16m1_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfslide1up_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfslide1up_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1up_vf_f16m2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfslide1up_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfslide1up_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1up_vf_f16m4_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfslide1up_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfslide1up_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfslide1up_vf_f16m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfslide1up_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfslide1up_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfslide1up_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfslide1up_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfslide1up_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, + size_t vl) { return __riscv_vfslide1up_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfslide1up_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfslide1up_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, + size_t vl) { return __riscv_vfslide1up_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfslide1up_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfslide1up_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, + size_t vl) { return __riscv_vfslide1up_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfslide1up_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfslide1up_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, + size_t vl) { return __riscv_vfslide1up_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfslide1up_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfslide1up_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, + size_t vl) { return __riscv_vfslide1up_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfslide1up_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfslide1up_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, + size_t vl) { return __riscv_vfslide1up_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfslide1up_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfslide1up_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, + size_t vl) { return __riscv_vfslide1up_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfslide1up_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfslide1up_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, + size_t vl) { return __riscv_vfslide1up_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfsqrt.c b/auto-generated/policy_funcs/llvm-api-tests/vfsqrt.c index a9c28b989..acdc47d45 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfsqrt.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfsqrt.c @@ -1,487 +1,607 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vfsqrt_v_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfsqrt_v_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfsqrt_v_f16mf4_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfsqrt_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfsqrt_v_f16mf2_tu(vd, vs2, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfsqrt_v_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfsqrt_v_f16m1_tu(vd, vs2, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfsqrt_v_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfsqrt_v_f16m2_tu(vd, vs2, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfsqrt_v_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfsqrt_v_f16m4_tu(vd, vs2, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfsqrt_v_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfsqrt_v_f16m8_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfsqrt_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfsqrt_v_f32mf2_tu(vd, vs2, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfsqrt_v_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfsqrt_v_f32m1_tu(vd, vs2, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfsqrt_v_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfsqrt_v_f32m2_tu(vd, vs2, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfsqrt_v_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfsqrt_v_f32m4_tu(vd, vs2, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfsqrt_v_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfsqrt_v_f32m8_tu(vd, vs2, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfsqrt_v_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfsqrt_v_f64m1_tu(vd, vs2, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfsqrt_v_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfsqrt_v_f64m2_tu(vd, vs2, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfsqrt_v_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfsqrt_v_f64m4_tu(vd, vs2, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfsqrt_v_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfsqrt_v_f64m8_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfsqrt_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfsqrt_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16mf4_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfsqrt_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16mf2_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfsqrt_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16m1_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfsqrt_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16m2_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfsqrt_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16m4_tum(vm, vd, vs2, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfsqrt_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16m8_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfsqrt_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfsqrt_v_f32mf2_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfsqrt_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfsqrt_v_f32m1_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfsqrt_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfsqrt_v_f32m2_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfsqrt_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfsqrt_v_f32m4_tum(vm, vd, vs2, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfsqrt_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfsqrt_v_f32m8_tum(vm, vd, vs2, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfsqrt_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfsqrt_v_f64m1_tum(vm, vd, vs2, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfsqrt_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfsqrt_v_f64m2_tum(vm, vd, vs2, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfsqrt_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfsqrt_v_f64m4_tum(vm, vd, vs2, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfsqrt_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfsqrt_v_f64m8_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfsqrt_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfsqrt_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16mf4_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfsqrt_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16mf2_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfsqrt_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16m1_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfsqrt_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16m2_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfsqrt_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16m4_tumu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfsqrt_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16m8_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfsqrt_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfsqrt_v_f32mf2_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfsqrt_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfsqrt_v_f32m1_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfsqrt_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfsqrt_v_f32m2_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfsqrt_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfsqrt_v_f32m4_tumu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfsqrt_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfsqrt_v_f32m8_tumu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfsqrt_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfsqrt_v_f64m1_tumu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfsqrt_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfsqrt_v_f64m2_tumu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfsqrt_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfsqrt_v_f64m4_tumu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfsqrt_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfsqrt_v_f64m8_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfsqrt_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfsqrt_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16mf4_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfsqrt_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16mf2_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfsqrt_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16m1_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfsqrt_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16m2_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfsqrt_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16m4_mu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfsqrt_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16m8_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfsqrt_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfsqrt_v_f32mf2_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfsqrt_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfsqrt_v_f32m1_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfsqrt_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfsqrt_v_f32m2_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfsqrt_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfsqrt_v_f32m4_mu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfsqrt_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfsqrt_v_f32m8_mu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfsqrt_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfsqrt_v_f64m1_mu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfsqrt_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfsqrt_v_f64m2_mu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfsqrt_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfsqrt_v_f64m4_mu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfsqrt_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfsqrt_v_f64m8_mu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfsqrt_v_f16mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfsqrt_v_f16mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfsqrt_v_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfsqrt_v_f16m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfsqrt_v_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfsqrt_v_f16m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfsqrt_v_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfsqrt_v_f16m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfsqrt_v_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + size_t vl) { return __riscv_vfsqrt_v_f16m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfsqrt_v_f32mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfsqrt_v_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfsqrt_v_f32m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfsqrt_v_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfsqrt_v_f32m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfsqrt_v_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfsqrt_v_f32m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfsqrt_v_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + size_t vl) { return __riscv_vfsqrt_v_f32m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfsqrt_v_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + size_t vl) { return __riscv_vfsqrt_v_f64m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfsqrt_v_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + size_t vl) { return __riscv_vfsqrt_v_f64m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfsqrt_v_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + size_t vl) { return __riscv_vfsqrt_v_f64m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfsqrt_v_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + size_t vl) { return __riscv_vfsqrt_v_f64m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfsqrt_v_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfsqrt_v_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfsqrt_v_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfsqrt_v_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfsqrt_v_f32mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfsqrt_v_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfsqrt_v_f32m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfsqrt_v_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfsqrt_v_f32m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfsqrt_v_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfsqrt_v_f32m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfsqrt_v_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfsqrt_v_f32m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfsqrt_v_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfsqrt_v_f64m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfsqrt_v_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfsqrt_v_f64m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfsqrt_v_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfsqrt_v_f64m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfsqrt_v_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfsqrt_v_f64m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfsqrt_v_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfsqrt_v_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfsqrt_v_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfsqrt_v_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfsqrt_v_f32mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfsqrt_v_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfsqrt_v_f32m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfsqrt_v_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfsqrt_v_f32m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfsqrt_v_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfsqrt_v_f32m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfsqrt_v_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfsqrt_v_f32m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfsqrt_v_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfsqrt_v_f64m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfsqrt_v_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfsqrt_v_f64m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfsqrt_v_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfsqrt_v_f64m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfsqrt_v_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfsqrt_v_f64m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vl) { +vfloat16mf4_t test_vfsqrt_v_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vl) { +vfloat16mf2_t test_vfsqrt_v_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsqrt_v_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vl) { +vfloat16m1_t test_vfsqrt_v_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsqrt_v_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vl) { +vfloat16m2_t test_vfsqrt_v_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsqrt_v_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vl) { +vfloat16m4_t test_vfsqrt_v_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsqrt_v_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vl) { +vfloat16m8_t test_vfsqrt_v_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, size_t vl) { return __riscv_vfsqrt_v_f16m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat32mf2_t test_vfsqrt_v_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfsqrt_v_f32mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsqrt_v_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat32m1_t test_vfsqrt_v_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfsqrt_v_f32m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsqrt_v_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat32m2_t test_vfsqrt_v_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfsqrt_v_f32m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsqrt_v_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat32m4_t test_vfsqrt_v_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfsqrt_v_f32m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsqrt_v_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vl) { +vfloat32m8_t test_vfsqrt_v_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, size_t vl) { return __riscv_vfsqrt_v_f32m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsqrt_v_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vl) { +vfloat64m1_t test_vfsqrt_v_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, size_t vl) { return __riscv_vfsqrt_v_f64m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsqrt_v_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vl) { +vfloat64m2_t test_vfsqrt_v_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, size_t vl) { return __riscv_vfsqrt_v_f64m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsqrt_v_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vl) { +vfloat64m4_t test_vfsqrt_v_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, size_t vl) { return __riscv_vfsqrt_v_f64m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsqrt_v_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vl) { +vfloat64m8_t test_vfsqrt_v_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, size_t vl) { return __riscv_vfsqrt_v_f64m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfsub.c b/auto-generated/policy_funcs/llvm-api-tests/vfsub.c index c5434f133..b39f6f678 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfsub.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfsub.c @@ -1,967 +1,1354 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vfsub_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfsub_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + vfloat16mf4_t vs1, size_t vl) { return __riscv_vfsub_vv_f16mf4_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfsub_vf_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfsub_vf_f16mf4_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfsub_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + vfloat16mf2_t vs1, size_t vl) { return __riscv_vfsub_vv_f16mf2_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfsub_vf_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfsub_vf_f16mf2_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsub_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfsub_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfsub_vv_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsub_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfsub_vf_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfsub_vf_f16m1_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsub_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfsub_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + vfloat16m2_t vs1, size_t vl) { return __riscv_vfsub_vv_f16m2_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsub_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfsub_vf_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfsub_vf_f16m2_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsub_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfsub_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + vfloat16m4_t vs1, size_t vl) { return __riscv_vfsub_vv_f16m4_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsub_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfsub_vf_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfsub_vf_f16m4_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsub_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfsub_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + vfloat16m8_t vs1, size_t vl) { return __riscv_vfsub_vv_f16m8_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsub_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfsub_vf_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfsub_vf_f16m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfsub_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + vfloat32mf2_t vs1, size_t vl) { return __riscv_vfsub_vv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfsub_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + float rs1, size_t vl) { return __riscv_vfsub_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsub_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfsub_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfsub_vv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsub_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfsub_vf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + float rs1, size_t vl) { return __riscv_vfsub_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsub_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfsub_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + vfloat32m2_t vs1, size_t vl) { return __riscv_vfsub_vv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsub_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfsub_vf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + float rs1, size_t vl) { return __riscv_vfsub_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsub_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfsub_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + vfloat32m4_t vs1, size_t vl) { return __riscv_vfsub_vv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsub_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfsub_vf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + float rs1, size_t vl) { return __riscv_vfsub_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsub_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfsub_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + vfloat32m8_t vs1, size_t vl) { return __riscv_vfsub_vv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsub_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfsub_vf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + float rs1, size_t vl) { return __riscv_vfsub_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsub_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfsub_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfsub_vv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsub_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfsub_vf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + double rs1, size_t vl) { return __riscv_vfsub_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsub_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfsub_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + vfloat64m2_t vs1, size_t vl) { return __riscv_vfsub_vv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsub_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfsub_vf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + double rs1, size_t vl) { return __riscv_vfsub_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsub_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfsub_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + vfloat64m4_t vs1, size_t vl) { return __riscv_vfsub_vv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsub_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfsub_vf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + double rs1, size_t vl) { return __riscv_vfsub_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsub_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfsub_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + vfloat64m8_t vs1, size_t vl) { return __riscv_vfsub_vv_f64m8_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsub_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfsub_vf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + double rs1, size_t vl) { return __riscv_vfsub_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsub_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfsub_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16mf4_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfsub_vf_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16mf4_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfsub_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfsub_vf_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsub_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfsub_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsub_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfsub_vf_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16m1_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsub_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfsub_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16m2_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsub_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfsub_vf_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16m2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsub_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfsub_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16m4_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsub_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfsub_vf_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16m4_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsub_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfsub_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16m8_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsub_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfsub_vf_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfsub_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfsub_vv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfsub_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfsub_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsub_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfsub_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfsub_vv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsub_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfsub_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfsub_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsub_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfsub_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfsub_vv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsub_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfsub_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfsub_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsub_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfsub_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfsub_vv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsub_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfsub_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfsub_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsub_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfsub_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfsub_vv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsub_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfsub_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfsub_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsub_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfsub_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfsub_vv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsub_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfsub_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vfsub_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsub_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfsub_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfsub_vv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsub_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfsub_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vfsub_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsub_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfsub_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfsub_vv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsub_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfsub_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vfsub_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsub_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfsub_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfsub_vv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsub_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfsub_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, size_t vl) { return __riscv_vfsub_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsub_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfsub_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16mf4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfsub_vf_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16mf4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfsub_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfsub_vf_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsub_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfsub_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsub_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfsub_vf_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsub_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfsub_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsub_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfsub_vf_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsub_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfsub_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsub_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfsub_vf_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsub_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfsub_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsub_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfsub_vf_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfsub_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfsub_vv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfsub_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfsub_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsub_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfsub_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfsub_vv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsub_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfsub_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfsub_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsub_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfsub_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfsub_vv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsub_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfsub_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfsub_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsub_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfsub_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfsub_vv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsub_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfsub_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfsub_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsub_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfsub_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfsub_vv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsub_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfsub_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfsub_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsub_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfsub_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfsub_vv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsub_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfsub_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vfsub_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsub_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfsub_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfsub_vv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsub_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfsub_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vfsub_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsub_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfsub_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfsub_vv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsub_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfsub_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vfsub_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsub_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfsub_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfsub_vv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsub_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfsub_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, size_t vl) { return __riscv_vfsub_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsub_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfsub_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16mf4_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfsub_vf_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16mf4_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfsub_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfsub_vf_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vfsub_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfsub_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16m1_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vfsub_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfsub_vf_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, size_t vl) { return __riscv_vfsub_vf_f16m1_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vfsub_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfsub_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16m2_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vfsub_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfsub_vf_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, size_t vl) { return __riscv_vfsub_vf_f16m2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vfsub_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfsub_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16m4_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vfsub_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfsub_vf_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, size_t vl) { return __riscv_vfsub_vf_f16m4_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vfsub_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfsub_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16m8_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vfsub_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfsub_vf_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, size_t vl) { return __riscv_vfsub_vf_f16m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfsub_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfsub_vv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfsub_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, size_t vl) { return __riscv_vfsub_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfsub_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfsub_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfsub_vv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfsub_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfsub_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfsub_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfsub_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfsub_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfsub_vv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfsub_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfsub_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfsub_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfsub_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfsub_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfsub_vv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfsub_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfsub_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfsub_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfsub_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfsub_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfsub_vv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfsub_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfsub_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfsub_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfsub_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfsub_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfsub_vv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfsub_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfsub_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vfsub_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfsub_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfsub_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfsub_vv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfsub_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfsub_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vfsub_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfsub_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfsub_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfsub_vv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfsub_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfsub_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vfsub_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfsub_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfsub_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfsub_vv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfsub_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfsub_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, size_t vl) { return __riscv_vfsub_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vfsub_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfsub_vv_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + vfloat16mf4_t vs1, size_t vl) { return __riscv_vfsub_vv_f16mf4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfsub_vf_f16mf4_rm_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfsub_vf_f16mf4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfsub_vv_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + vfloat16mf2_t vs1, size_t vl) { return __riscv_vfsub_vv_f16mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfsub_vf_f16mf2_rm_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfsub_vf_f16mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfsub_vv_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfsub_vv_f16m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vf_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfsub_vf_f16m1_rm_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfsub_vf_f16m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfsub_vv_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + vfloat16m2_t vs1, size_t vl) { return __riscv_vfsub_vv_f16m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vf_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfsub_vf_f16m2_rm_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfsub_vf_f16m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfsub_vv_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + vfloat16m4_t vs1, size_t vl) { return __riscv_vfsub_vv_f16m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vf_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfsub_vf_f16m4_rm_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfsub_vf_f16m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfsub_vv_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + vfloat16m8_t vs1, size_t vl) { return __riscv_vfsub_vv_f16m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vf_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfsub_vf_f16m8_rm_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfsub_vf_f16m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfsub_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + vfloat32mf2_t vs1, size_t vl) { return __riscv_vfsub_vv_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfsub_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + float rs1, size_t vl) { return __riscv_vfsub_vf_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfsub_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfsub_vv_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfsub_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + float rs1, size_t vl) { return __riscv_vfsub_vf_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfsub_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + vfloat32m2_t vs1, size_t vl) { return __riscv_vfsub_vv_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfsub_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + float rs1, size_t vl) { return __riscv_vfsub_vf_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfsub_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + vfloat32m4_t vs1, size_t vl) { return __riscv_vfsub_vv_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfsub_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + float rs1, size_t vl) { return __riscv_vfsub_vf_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfsub_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + vfloat32m8_t vs1, size_t vl) { return __riscv_vfsub_vv_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfsub_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + float rs1, size_t vl) { return __riscv_vfsub_vf_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfsub_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfsub_vv_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfsub_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + double rs1, size_t vl) { return __riscv_vfsub_vf_f64m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfsub_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + vfloat64m2_t vs1, size_t vl) { return __riscv_vfsub_vv_f64m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfsub_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + double rs1, size_t vl) { return __riscv_vfsub_vf_f64m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfsub_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + vfloat64m4_t vs1, size_t vl) { return __riscv_vfsub_vv_f64m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfsub_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + double rs1, size_t vl) { return __riscv_vfsub_vf_f64m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfsub_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + vfloat64m8_t vs1, size_t vl) { return __riscv_vfsub_vv_f64m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfsub_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + double rs1, size_t vl) { return __riscv_vfsub_vf_f64m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsub_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfsub_vv_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16mf4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfsub_vf_f16mf4_rm_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16mf4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfsub_vv_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16mf2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfsub_vf_f16mf2_rm_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfsub_vv_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfsub_vf_f16m1_rm_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfsub_vv_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfsub_vf_f16m2_rm_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfsub_vv_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfsub_vf_f16m4_rm_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfsub_vv_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfsub_vf_f16m8_rm_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfsub_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfsub_vv_f32mf2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfsub_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfsub_vf_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfsub_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfsub_vv_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfsub_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, + size_t vl) { return __riscv_vfsub_vf_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfsub_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfsub_vv_f32m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfsub_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, + size_t vl) { return __riscv_vfsub_vf_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfsub_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfsub_vv_f32m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfsub_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, + size_t vl) { return __riscv_vfsub_vf_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfsub_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfsub_vv_f32m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfsub_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, + size_t vl) { return __riscv_vfsub_vf_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfsub_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfsub_vv_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfsub_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, + size_t vl) { return __riscv_vfsub_vf_f64m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfsub_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfsub_vv_f64m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfsub_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, + size_t vl) { return __riscv_vfsub_vf_f64m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfsub_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfsub_vv_f64m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfsub_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, + size_t vl) { return __riscv_vfsub_vf_f64m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfsub_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfsub_vv_f64m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfsub_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, + size_t vl) { return __riscv_vfsub_vf_f64m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsub_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfsub_vv_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16mf4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfsub_vf_f16mf4_rm_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16mf4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfsub_vv_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16mf2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfsub_vf_f16mf2_rm_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfsub_vv_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfsub_vf_f16m1_rm_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfsub_vv_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfsub_vf_f16m2_rm_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfsub_vv_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfsub_vf_f16m4_rm_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfsub_vv_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfsub_vf_f16m8_rm_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfsub_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfsub_vv_f32mf2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfsub_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfsub_vf_f32mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfsub_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfsub_vv_f32m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfsub_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, + size_t vl) { return __riscv_vfsub_vf_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfsub_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfsub_vv_f32m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfsub_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, + size_t vl) { return __riscv_vfsub_vf_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfsub_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfsub_vv_f32m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfsub_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, + size_t vl) { return __riscv_vfsub_vf_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfsub_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfsub_vv_f32m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfsub_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, + size_t vl) { return __riscv_vfsub_vf_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfsub_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfsub_vv_f64m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfsub_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, + size_t vl) { return __riscv_vfsub_vf_f64m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfsub_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfsub_vv_f64m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfsub_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, + size_t vl) { return __riscv_vfsub_vf_f64m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfsub_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfsub_vv_f64m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfsub_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, + size_t vl) { return __riscv_vfsub_vf_f64m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfsub_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfsub_vv_f64m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfsub_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, + size_t vl) { return __riscv_vfsub_vf_f64m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsub_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vfsub_vv_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16mf4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf4_t test_vfsub_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf4_t test_vfsub_vf_f16mf4_rm_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16mf4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vfsub_vv_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16mf2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16mf2_t test_vfsub_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat16mf2_t test_vfsub_vf_f16mf2_rm_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vfsub_vv_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m1_t test_vfsub_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat16m1_t test_vfsub_vf_f16m1_rm_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vfsub_vv_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m2_t test_vfsub_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat16m2_t test_vfsub_vf_f16m2_rm_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vfsub_vv_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m4_t test_vfsub_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat16m4_t test_vfsub_vf_f16m4_rm_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vfsub_vv_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vfloat16m8_t vs1, + size_t vl) { return __riscv_vfsub_vv_f16m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat16m8_t test_vfsub_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vfloat16m8_t test_vfsub_vf_f16m8_rm_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfsub_vf_f16m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vfsub_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfsub_vv_f32mf2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfsub_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat32mf2_t test_vfsub_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfsub_vf_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfsub_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfsub_vv_f32m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfsub_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat32m1_t test_vfsub_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfsub_vf_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vfsub_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfsub_vv_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfsub_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat32m2_t test_vfsub_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfsub_vf_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vfsub_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfsub_vv_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfsub_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat32m4_t test_vfsub_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfsub_vf_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vfsub_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat32m8_t vs1, + size_t vl) { return __riscv_vfsub_vv_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfsub_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vfloat32m8_t test_vfsub_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, float rs1, size_t vl) { return __riscv_vfsub_vf_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfsub_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vfsub_vv_f64m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfsub_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vfloat64m1_t test_vfsub_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, double rs1, + size_t vl) { return __riscv_vfsub_vf_f64m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vfsub_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vfsub_vv_f64m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfsub_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vfloat64m2_t test_vfsub_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, double rs1, + size_t vl) { return __riscv_vfsub_vf_f64m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vfsub_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vfsub_vv_f64m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfsub_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vfloat64m4_t test_vfsub_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, double rs1, + size_t vl) { return __riscv_vfsub_vf_f64m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vfsub_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat64m8_t vs1, + size_t vl) { return __riscv_vfsub_vv_f64m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfsub_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vfloat64m8_t test_vfsub_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, double rs1, + size_t vl) { return __riscv_vfsub_vf_f64m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfwadd.c b/auto-generated/policy_funcs/llvm-api-tests/vfwadd.c index 31be8619f..2a42ff056 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfwadd.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfwadd.c @@ -1,1159 +1,1644 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32mf2_t test_vfwadd_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwadd_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, + vfloat16mf4_t vs1, size_t vl) { return __riscv_vfwadd_vv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwadd_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwadd_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwadd_wv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + vfloat16mf4_t vs1, size_t vl) { return __riscv_vfwadd_wv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwadd_wf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwadd_wf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwadd_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, + vfloat16mf2_t vs1, size_t vl) { return __riscv_vfwadd_vv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwadd_vf_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwadd_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwadd_wv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vfloat16mf2_t vs1, size_t vl) { return __riscv_vfwadd_wv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwadd_wf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwadd_wf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwadd_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfwadd_vv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwadd_vf_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwadd_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwadd_wv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfwadd_wv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwadd_wf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwadd_wf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwadd_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs2, + vfloat16m2_t vs1, size_t vl) { return __riscv_vfwadd_vv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwadd_vf_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwadd_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwadd_wv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + vfloat16m2_t vs1, size_t vl) { return __riscv_vfwadd_wv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwadd_wf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwadd_wf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwadd_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs2, + vfloat16m4_t vs1, size_t vl) { return __riscv_vfwadd_vv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwadd_vf_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwadd_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwadd_wv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + vfloat16m4_t vs1, size_t vl) { return __riscv_vfwadd_wv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwadd_wf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwadd_wf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwadd_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, + vfloat32mf2_t vs1, size_t vl) { return __riscv_vfwadd_vv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwadd_vf_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, + float rs1, size_t vl) { return __riscv_vfwadd_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwadd_wv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + vfloat32mf2_t vs1, size_t vl) { return __riscv_vfwadd_wv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwadd_wf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + float rs1, size_t vl) { return __riscv_vfwadd_wf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwadd_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwadd_vv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwadd_vf_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs2, + float rs1, size_t vl) { return __riscv_vfwadd_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwadd_wv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwadd_wv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwadd_wf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + float rs1, size_t vl) { return __riscv_vfwadd_wf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwadd_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs2, + vfloat32m2_t vs1, size_t vl) { return __riscv_vfwadd_vv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwadd_vf_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs2, + float rs1, size_t vl) { return __riscv_vfwadd_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwadd_wv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + vfloat32m2_t vs1, size_t vl) { return __riscv_vfwadd_wv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwadd_wf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + float rs1, size_t vl) { return __riscv_vfwadd_wf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwadd_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs2, + vfloat32m4_t vs1, size_t vl) { return __riscv_vfwadd_vv_f64m8_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwadd_vf_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs2, + float rs1, size_t vl) { return __riscv_vfwadd_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwadd_wv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + vfloat32m4_t vs1, size_t vl) { return __riscv_vfwadd_wv_f64m8_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwadd_wf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + float rs1, size_t vl) { return __riscv_vfwadd_wf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwadd_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwadd_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwadd_wv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwadd_wf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_wf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwadd_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwadd_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwadd_wv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwadd_wf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_wf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwadd_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwadd_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwadd_wv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwadd_wf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_wf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwadd_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwadd_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwadd_wv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwadd_wf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_wf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwadd_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwadd_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwadd_wv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwadd_wf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_wf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwadd_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwadd_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, float rs1, size_t vl) { return __riscv_vfwadd_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwadd_wv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwadd_wf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, float rs1, size_t vl) { return __riscv_vfwadd_wf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwadd_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwadd_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfwadd_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwadd_wv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwadd_wf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, float rs1, size_t vl) { return __riscv_vfwadd_wf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwadd_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwadd_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfwadd_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwadd_wv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwadd_wf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, float rs1, size_t vl) { return __riscv_vfwadd_wf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwadd_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwadd_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfwadd_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwadd_wv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwadd_wf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, float rs1, size_t vl) { return __riscv_vfwadd_wf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwadd_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwadd_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwadd_wv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwadd_wf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_wf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwadd_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwadd_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwadd_wv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwadd_wf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_wf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwadd_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwadd_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwadd_wv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwadd_wf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_wf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwadd_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwadd_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwadd_wv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwadd_wf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_wf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwadd_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwadd_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwadd_wv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwadd_wf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_wf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwadd_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwadd_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfwadd_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwadd_wv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwadd_wf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, float rs1, size_t vl) { return __riscv_vfwadd_wf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwadd_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwadd_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfwadd_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwadd_wv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwadd_wf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, float rs1, size_t vl) { return __riscv_vfwadd_wf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwadd_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwadd_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfwadd_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwadd_wv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwadd_wf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, float rs1, size_t vl) { return __riscv_vfwadd_wf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwadd_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwadd_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfwadd_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwadd_wv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwadd_wf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, float rs1, size_t vl) { return __riscv_vfwadd_wf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwadd_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwadd_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwadd_wv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwadd_wf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_wf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwadd_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwadd_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwadd_wv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwadd_wf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_wf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwadd_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwadd_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwadd_wv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwadd_wf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_wf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwadd_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwadd_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwadd_wv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwadd_wf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_wf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwadd_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwadd_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwadd_wv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwadd_wf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_wf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwadd_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwadd_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, float rs1, size_t vl) { return __riscv_vfwadd_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwadd_wv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwadd_wf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, float rs1, size_t vl) { return __riscv_vfwadd_wf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwadd_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwadd_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfwadd_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwadd_wv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwadd_wf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, float rs1, size_t vl) { return __riscv_vfwadd_wf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwadd_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwadd_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfwadd_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwadd_wv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwadd_wf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, float rs1, size_t vl) { return __riscv_vfwadd_wf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwadd_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwadd_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfwadd_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwadd_wv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwadd_wf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, float rs1, size_t vl) { return __riscv_vfwadd_wf_f64m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwadd_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwadd_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, + vfloat16mf4_t vs1, size_t vl) { return __riscv_vfwadd_vv_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwadd_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwadd_vf_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwadd_wv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + vfloat16mf4_t vs1, size_t vl) { return __riscv_vfwadd_wv_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwadd_wf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwadd_wf_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwadd_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, + vfloat16mf2_t vs1, size_t vl) { return __riscv_vfwadd_vv_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwadd_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwadd_vf_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwadd_wv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vfloat16mf2_t vs1, size_t vl) { return __riscv_vfwadd_wv_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwadd_wf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwadd_wf_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwadd_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfwadd_vv_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwadd_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwadd_vf_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwadd_wv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfwadd_wv_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwadd_wf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwadd_wf_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwadd_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs2, + vfloat16m2_t vs1, size_t vl) { return __riscv_vfwadd_vv_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwadd_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwadd_vf_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwadd_wv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + vfloat16m2_t vs1, size_t vl) { return __riscv_vfwadd_wv_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwadd_wf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwadd_wf_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwadd_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs2, + vfloat16m4_t vs1, size_t vl) { return __riscv_vfwadd_vv_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwadd_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwadd_vf_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwadd_wv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + vfloat16m4_t vs1, size_t vl) { return __riscv_vfwadd_wv_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwadd_wf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwadd_wf_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwadd_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, + vfloat32mf2_t vs1, size_t vl) { return __riscv_vfwadd_vv_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwadd_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, + float rs1, size_t vl) { return __riscv_vfwadd_vf_f64m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwadd_wv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + vfloat32mf2_t vs1, size_t vl) { return __riscv_vfwadd_wv_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwadd_wf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + float rs1, size_t vl) { return __riscv_vfwadd_wf_f64m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwadd_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwadd_vv_f64m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwadd_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs2, + float rs1, size_t vl) { return __riscv_vfwadd_vf_f64m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwadd_wv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwadd_wv_f64m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwadd_wf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + float rs1, size_t vl) { return __riscv_vfwadd_wf_f64m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwadd_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs2, + vfloat32m2_t vs1, size_t vl) { return __riscv_vfwadd_vv_f64m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwadd_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs2, + float rs1, size_t vl) { return __riscv_vfwadd_vf_f64m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwadd_wv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + vfloat32m2_t vs1, size_t vl) { return __riscv_vfwadd_wv_f64m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwadd_wf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + float rs1, size_t vl) { return __riscv_vfwadd_wf_f64m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwadd_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs2, + vfloat32m4_t vs1, size_t vl) { return __riscv_vfwadd_vv_f64m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwadd_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs2, + float rs1, size_t vl) { return __riscv_vfwadd_vf_f64m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwadd_wv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + vfloat32m4_t vs1, size_t vl) { return __riscv_vfwadd_wv_f64m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwadd_wf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + float rs1, size_t vl) { return __riscv_vfwadd_wf_f64m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwadd_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f32mf2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwadd_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_vf_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwadd_wv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f32mf2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwadd_wf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_wf_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwadd_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwadd_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_vf_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwadd_wv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwadd_wf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_wf_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwadd_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f32m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwadd_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_vf_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwadd_wv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f32m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwadd_wf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_wf_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwadd_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f32m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwadd_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_vf_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwadd_wv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f32m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwadd_wf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_wf_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwadd_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f32m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwadd_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_vf_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwadd_wv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f32m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwadd_wf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_wf_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwadd_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwadd_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfwadd_vf_f64m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwadd_wv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwadd_wf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, float rs1, + size_t vl) { return __riscv_vfwadd_wf_f64m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwadd_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f64m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwadd_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, float rs1, + size_t vl) { return __riscv_vfwadd_vf_f64m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwadd_wv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f64m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwadd_wf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, float rs1, + size_t vl) { return __riscv_vfwadd_wf_f64m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwadd_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f64m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwadd_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, float rs1, + size_t vl) { return __riscv_vfwadd_vf_f64m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwadd_wv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f64m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwadd_wf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, float rs1, + size_t vl) { return __riscv_vfwadd_wf_f64m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwadd_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f64m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwadd_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, float rs1, + size_t vl) { return __riscv_vfwadd_vf_f64m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwadd_wv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f64m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwadd_wf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, float rs1, + size_t vl) { return __riscv_vfwadd_wf_f64m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { - return __riscv_vfwadd_vv_f32mf2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, + vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_f32mf2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { - return __riscv_vfwadd_vf_f32mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_f32mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { - return __riscv_vfwadd_wv_f32mf2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_wv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_f32mf2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, _Float16 rs1, size_t vl) { - return __riscv_vfwadd_wf_f32mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwadd_wf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, _Float16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_f32mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfwadd_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwadd_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f32m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwadd_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_vf_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwadd_wv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f32m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwadd_wf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_wf_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwadd_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f32m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwadd_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_vf_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwadd_wv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f32m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwadd_wf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_wf_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwadd_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f32m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwadd_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_vf_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwadd_wv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f32m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwadd_wf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_wf_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwadd_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f32m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwadd_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_vf_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwadd_wv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f32m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwadd_wf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_wf_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwadd_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f64m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwadd_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfwadd_vf_f64m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwadd_wv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f64m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwadd_wf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, float rs1, + size_t vl) { return __riscv_vfwadd_wf_f64m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwadd_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f64m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwadd_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, float rs1, + size_t vl) { return __riscv_vfwadd_vf_f64m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwadd_wv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f64m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwadd_wf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, float rs1, + size_t vl) { return __riscv_vfwadd_wf_f64m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwadd_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f64m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwadd_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, float rs1, + size_t vl) { return __riscv_vfwadd_vf_f64m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwadd_wv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f64m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwadd_wf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, float rs1, + size_t vl) { return __riscv_vfwadd_wf_f64m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwadd_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f64m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwadd_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, float rs1, + size_t vl) { return __riscv_vfwadd_vf_f64m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwadd_wv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f64m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwadd_wf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, float rs1, + size_t vl) { return __riscv_vfwadd_wf_f64m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwadd_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f32mf2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwadd_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_vf_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_wv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwadd_wv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f32mf2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwadd_wf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwadd_wf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_wf_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwadd_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f32m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwadd_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_vf_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwadd_wv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f32m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwadd_wf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwadd_wf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_wf_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwadd_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwadd_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_vf_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwadd_wv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwadd_wf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwadd_wf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_wf_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwadd_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwadd_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_vf_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwadd_wv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwadd_wf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwadd_wf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_wf_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwadd_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwadd_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_vf_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwadd_wv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwadd_wf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwadd_wf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwadd_wf_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwadd_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f64m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwadd_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfwadd_vf_f64m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwadd_wv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f64m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwadd_wf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwadd_wf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, float rs1, + size_t vl) { return __riscv_vfwadd_wf_f64m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwadd_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f64m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwadd_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, float rs1, + size_t vl) { return __riscv_vfwadd_vf_f64m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwadd_wv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f64m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwadd_wf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwadd_wf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, float rs1, + size_t vl) { return __riscv_vfwadd_wf_f64m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwadd_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f64m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwadd_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, float rs1, + size_t vl) { return __riscv_vfwadd_vf_f64m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwadd_wv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f64m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwadd_wf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwadd_wf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, float rs1, + size_t vl) { return __riscv_vfwadd_wf_f64m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwadd_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfwadd_vv_f64m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwadd_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, float rs1, + size_t vl) { return __riscv_vfwadd_vf_f64m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwadd_wv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfwadd_wv_f64m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwadd_wf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwadd_wf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, float rs1, + size_t vl) { return __riscv_vfwadd_wf_f64m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfwcvt.c b/auto-generated/policy_funcs/llvm-api-tests/vfwcvt.c index 1f40b1ef0..556843fd4 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfwcvt.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfwcvt.c @@ -1,1207 +1,1507 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_tu(vfloat16mf4_t vd, vint8mf8_t vs2, size_t vl) { +vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_tu(vfloat16mf4_t vd, vint8mf8_t vs2, + size_t vl) { return __riscv_vfwcvt_f_x_v_f16mf4_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_tu(vfloat16mf2_t vd, vint8mf4_t vs2, size_t vl) { +vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_tu(vfloat16mf2_t vd, vint8mf4_t vs2, + size_t vl) { return __riscv_vfwcvt_f_x_v_f16mf2_tu(vd, vs2, vl); } -vfloat16m1_t test_vfwcvt_f_x_v_f16m1_tu(vfloat16m1_t vd, vint8mf2_t vs2, size_t vl) { +vfloat16m1_t test_vfwcvt_f_x_v_f16m1_tu(vfloat16m1_t vd, vint8mf2_t vs2, + size_t vl) { return __riscv_vfwcvt_f_x_v_f16m1_tu(vd, vs2, vl); } -vfloat16m2_t test_vfwcvt_f_x_v_f16m2_tu(vfloat16m2_t vd, vint8m1_t vs2, size_t vl) { +vfloat16m2_t test_vfwcvt_f_x_v_f16m2_tu(vfloat16m2_t vd, vint8m1_t vs2, + size_t vl) { return __riscv_vfwcvt_f_x_v_f16m2_tu(vd, vs2, vl); } -vfloat16m4_t test_vfwcvt_f_x_v_f16m4_tu(vfloat16m4_t vd, vint8m2_t vs2, size_t vl) { +vfloat16m4_t test_vfwcvt_f_x_v_f16m4_tu(vfloat16m4_t vd, vint8m2_t vs2, + size_t vl) { return __riscv_vfwcvt_f_x_v_f16m4_tu(vd, vs2, vl); } -vfloat16m8_t test_vfwcvt_f_x_v_f16m8_tu(vfloat16m8_t vd, vint8m4_t vs2, size_t vl) { +vfloat16m8_t test_vfwcvt_f_x_v_f16m8_tu(vfloat16m8_t vd, vint8m4_t vs2, + size_t vl) { return __riscv_vfwcvt_f_x_v_f16m8_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_tu(vfloat16mf4_t vd, vuint8mf8_t vs2, size_t vl) { +vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_tu(vfloat16mf4_t vd, vuint8mf8_t vs2, + size_t vl) { return __riscv_vfwcvt_f_xu_v_f16mf4_tu(vd, vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_tu(vfloat16mf2_t vd, vuint8mf4_t vs2, size_t vl) { +vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_tu(vfloat16mf2_t vd, vuint8mf4_t vs2, + size_t vl) { return __riscv_vfwcvt_f_xu_v_f16mf2_tu(vd, vs2, vl); } -vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_tu(vfloat16m1_t vd, vuint8mf2_t vs2, size_t vl) { +vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_tu(vfloat16m1_t vd, vuint8mf2_t vs2, + size_t vl) { return __riscv_vfwcvt_f_xu_v_f16m1_tu(vd, vs2, vl); } -vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_tu(vfloat16m2_t vd, vuint8m1_t vs2, size_t vl) { +vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_tu(vfloat16m2_t vd, vuint8m1_t vs2, + size_t vl) { return __riscv_vfwcvt_f_xu_v_f16m2_tu(vd, vs2, vl); } -vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_tu(vfloat16m4_t vd, vuint8m2_t vs2, size_t vl) { +vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_tu(vfloat16m4_t vd, vuint8m2_t vs2, + size_t vl) { return __riscv_vfwcvt_f_xu_v_f16m4_tu(vd, vs2, vl); } -vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_tu(vfloat16m8_t vd, vuint8m4_t vs2, size_t vl) { +vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_tu(vfloat16m8_t vd, vuint8m4_t vs2, + size_t vl) { return __riscv_vfwcvt_f_xu_v_f16m8_tu(vd, vs2, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_tu(vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_tu(vint32mf2_t vd, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwcvt_x_f_v_i32mf2_tu(vd, vs2, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1_tu(vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { +vint32m1_t test_vfwcvt_x_f_v_i32m1_tu(vint32m1_t vd, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwcvt_x_f_v_i32m1_tu(vd, vs2, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2_tu(vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { +vint32m2_t test_vfwcvt_x_f_v_i32m2_tu(vint32m2_t vd, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwcvt_x_f_v_i32m2_tu(vd, vs2, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4_tu(vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { +vint32m4_t test_vfwcvt_x_f_v_i32m4_tu(vint32m4_t vd, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwcvt_x_f_v_i32m4_tu(vd, vs2, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8_tu(vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { +vint32m8_t test_vfwcvt_x_f_v_i32m8_tu(vint32m8_t vd, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwcvt_x_f_v_i32m8_tu(vd, vs2, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_tu(vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_tu(vuint32mf2_t vd, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwcvt_xu_f_v_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_tu(vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_tu(vuint32m1_t vd, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_tu(vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_tu(vuint32m2_t vd, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_tu(vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_tu(vuint32m4_t vd, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_tu(vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_tu(vuint32m8_t vd, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m8_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_tu(vfloat32mf2_t vd, vint16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_tu(vfloat32mf2_t vd, vint16mf4_t vs2, + size_t vl) { return __riscv_vfwcvt_f_x_v_f32mf2_tu(vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_x_v_f32m1_tu(vfloat32m1_t vd, vint16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwcvt_f_x_v_f32m1_tu(vfloat32m1_t vd, vint16mf2_t vs2, + size_t vl) { return __riscv_vfwcvt_f_x_v_f32m1_tu(vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_x_v_f32m2_tu(vfloat32m2_t vd, vint16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwcvt_f_x_v_f32m2_tu(vfloat32m2_t vd, vint16m1_t vs2, + size_t vl) { return __riscv_vfwcvt_f_x_v_f32m2_tu(vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_x_v_f32m4_tu(vfloat32m4_t vd, vint16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwcvt_f_x_v_f32m4_tu(vfloat32m4_t vd, vint16m2_t vs2, + size_t vl) { return __riscv_vfwcvt_f_x_v_f32m4_tu(vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_x_v_f32m8_tu(vfloat32m8_t vd, vint16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwcvt_f_x_v_f32m8_tu(vfloat32m8_t vd, vint16m4_t vs2, + size_t vl) { return __riscv_vfwcvt_f_x_v_f32m8_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_tu(vfloat32mf2_t vd, vuint16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_tu(vfloat32mf2_t vd, vuint16mf4_t vs2, + size_t vl) { return __riscv_vfwcvt_f_xu_v_f32mf2_tu(vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_tu(vfloat32m1_t vd, vuint16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_tu(vfloat32m1_t vd, vuint16mf2_t vs2, + size_t vl) { return __riscv_vfwcvt_f_xu_v_f32m1_tu(vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_tu(vfloat32m2_t vd, vuint16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_tu(vfloat32m2_t vd, vuint16m1_t vs2, + size_t vl) { return __riscv_vfwcvt_f_xu_v_f32m2_tu(vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_tu(vfloat32m4_t vd, vuint16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_tu(vfloat32m4_t vd, vuint16m2_t vs2, + size_t vl) { return __riscv_vfwcvt_f_xu_v_f32m4_tu(vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_tu(vfloat32m8_t vd, vuint16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_tu(vfloat32m8_t vd, vuint16m4_t vs2, + size_t vl) { return __riscv_vfwcvt_f_xu_v_f32m8_tu(vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwcvt_f_f_v_f32mf2_tu(vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_f_v_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwcvt_f_f_v_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwcvt_f_f_v_f32m1_tu(vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_f_v_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwcvt_f_f_v_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwcvt_f_f_v_f32m2_tu(vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_f_v_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwcvt_f_f_v_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwcvt_f_f_v_f32m4_tu(vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_f_v_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwcvt_f_f_v_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwcvt_f_f_v_f32m8_tu(vd, vs2, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1_tu(vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { +vint64m1_t test_vfwcvt_x_f_v_i64m1_tu(vint64m1_t vd, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwcvt_x_f_v_i64m1_tu(vd, vs2, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2_tu(vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { +vint64m2_t test_vfwcvt_x_f_v_i64m2_tu(vint64m2_t vd, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwcvt_x_f_v_i64m2_tu(vd, vs2, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4_tu(vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { +vint64m4_t test_vfwcvt_x_f_v_i64m4_tu(vint64m4_t vd, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwcvt_x_f_v_i64m4_tu(vd, vs2, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8_tu(vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { +vint64m8_t test_vfwcvt_x_f_v_i64m8_tu(vint64m8_t vd, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwcvt_x_f_v_i64m8_tu(vd, vs2, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tu(vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tu(vuint64m1_t vd, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m1_tu(vd, vs2, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_tu(vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_tu(vuint64m2_t vd, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m2_tu(vd, vs2, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_tu(vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_tu(vuint64m4_t vd, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m4_tu(vd, vs2, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_tu(vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_tu(vuint64m8_t vd, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m8_tu(vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tu(vfloat64m1_t vd, vint32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tu(vfloat64m1_t vd, vint32mf2_t vs2, + size_t vl) { return __riscv_vfwcvt_f_x_v_f64m1_tu(vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_x_v_f64m2_tu(vfloat64m2_t vd, vint32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwcvt_f_x_v_f64m2_tu(vfloat64m2_t vd, vint32m1_t vs2, + size_t vl) { return __riscv_vfwcvt_f_x_v_f64m2_tu(vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_x_v_f64m4_tu(vfloat64m4_t vd, vint32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwcvt_f_x_v_f64m4_tu(vfloat64m4_t vd, vint32m2_t vs2, + size_t vl) { return __riscv_vfwcvt_f_x_v_f64m4_tu(vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_x_v_f64m8_tu(vfloat64m8_t vd, vint32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwcvt_f_x_v_f64m8_tu(vfloat64m8_t vd, vint32m4_t vs2, + size_t vl) { return __riscv_vfwcvt_f_x_v_f64m8_tu(vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tu(vfloat64m1_t vd, vuint32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tu(vfloat64m1_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vfwcvt_f_xu_v_f64m1_tu(vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_tu(vfloat64m2_t vd, vuint32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_tu(vfloat64m2_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vfwcvt_f_xu_v_f64m2_tu(vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_tu(vfloat64m4_t vd, vuint32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_tu(vfloat64m4_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vfwcvt_f_xu_v_f64m4_tu(vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_tu(vfloat64m8_t vd, vuint32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_tu(vfloat64m8_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vfwcvt_f_xu_v_f64m8_tu(vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwcvt_f_f_v_f64m1_tu(vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_f_v_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwcvt_f_f_v_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwcvt_f_f_v_f64m2_tu(vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_f_v_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwcvt_f_f_v_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwcvt_f_f_v_f64m4_tu(vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_f_v_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwcvt_f_f_v_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwcvt_f_f_v_f64m8_tu(vd, vs2, vl); } -vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vint8mf8_t vs2, size_t vl) { +vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vint8mf8_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f16mf4_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vint8mf4_t vs2, size_t vl) { +vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vint8mf4_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f16mf2_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfwcvt_f_x_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vint8mf2_t vs2, size_t vl) { +vfloat16m1_t test_vfwcvt_f_x_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vint8mf2_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f16m1_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfwcvt_f_x_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vint8m1_t vs2, size_t vl) { +vfloat16m2_t test_vfwcvt_f_x_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vint8m1_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f16m2_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfwcvt_f_x_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vint8m2_t vs2, size_t vl) { +vfloat16m4_t test_vfwcvt_f_x_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vint8m2_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f16m4_tum(vm, vd, vs2, vl); } -vfloat16m8_t test_vfwcvt_f_x_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vint8m4_t vs2, size_t vl) { +vfloat16m8_t test_vfwcvt_f_x_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vint8m4_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f16m8_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vuint8mf8_t vs2, size_t vl) { +vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vuint8mf8_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f16mf4_tum(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vuint8mf4_t vs2, size_t vl) { +vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vuint8mf4_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f16mf2_tum(vm, vd, vs2, vl); } -vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vuint8mf2_t vs2, size_t vl) { +vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vuint8mf2_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f16m1_tum(vm, vd, vs2, vl); } -vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vuint8m1_t vs2, size_t vl) { +vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vuint8m1_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f16m2_tum(vm, vd, vs2, vl); } -vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vuint8m2_t vs2, size_t vl) { +vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vuint8m2_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f16m4_tum(vm, vd, vs2, vl); } -vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vuint8m4_t vs2, size_t vl) { +vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vuint8m4_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f16m8_tum(vm, vd, vs2, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i32mf2_tum(vm, vd, vs2, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { +vint32m1_t test_vfwcvt_x_f_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i32m1_tum(vm, vd, vs2, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { +vint32m2_t test_vfwcvt_x_f_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i32m2_tum(vm, vd, vs2, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { +vint32m4_t test_vfwcvt_x_f_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i32m4_tum(vm, vd, vs2, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { +vint32m8_t test_vfwcvt_x_f_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i32m8_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u32mf2_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m1_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m2_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m4_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m8_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vint16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vint16mf4_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f32mf2_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_x_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vint16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwcvt_f_x_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vint16mf2_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f32m1_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_x_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vint16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwcvt_f_x_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vint16m1_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f32m2_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_x_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vint16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwcvt_f_x_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vint16m2_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f32m4_tum(vm, vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_x_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vint16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwcvt_f_x_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vint16m4_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f32m8_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vuint16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vuint16mf4_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f32mf2_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vuint16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vuint16mf2_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f32m1_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vuint16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vuint16m1_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f32m2_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vuint16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vuint16m2_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f32m4_tum(vm, vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vuint16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vuint16m4_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f32m8_tum(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwcvt_f_f_v_f32mf2_tum(vm, vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_f_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwcvt_f_f_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwcvt_f_f_v_f32m1_tum(vm, vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_f_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwcvt_f_f_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwcvt_f_f_v_f32m2_tum(vm, vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_f_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwcvt_f_f_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwcvt_f_f_v_f32m4_tum(vm, vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_f_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwcvt_f_f_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwcvt_f_f_v_f32m8_tum(vm, vd, vs2, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { +vint64m1_t test_vfwcvt_x_f_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i64m1_tum(vm, vd, vs2, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { +vint64m2_t test_vfwcvt_x_f_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i64m2_tum(vm, vd, vs2, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { +vint64m4_t test_vfwcvt_x_f_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i64m4_tum(vm, vd, vs2, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { +vint64m8_t test_vfwcvt_x_f_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i64m8_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m1_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m2_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m4_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m8_tum(vm, vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vint32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vint32mf2_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f64m1_tum(vm, vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_x_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vint32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwcvt_f_x_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vint32m1_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f64m2_tum(vm, vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_x_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vint32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwcvt_f_x_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vint32m2_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f64m4_tum(vm, vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_x_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vint32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwcvt_f_x_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vint32m4_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f64m8_tum(vm, vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vuint32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vuint32mf2_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f64m1_tum(vm, vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vuint32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vuint32m1_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f64m2_tum(vm, vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vuint32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vuint32m2_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f64m4_tum(vm, vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vuint32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vuint32m4_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f64m8_tum(vm, vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwcvt_f_f_v_f64m1_tum(vm, vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_f_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwcvt_f_f_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwcvt_f_f_v_f64m2_tum(vm, vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_f_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwcvt_f_f_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwcvt_f_f_v_f64m4_tum(vm, vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_f_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwcvt_f_f_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwcvt_f_f_v_f64m8_tum(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vint8mf8_t vs2, size_t vl) { +vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vint8mf8_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f16mf4_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vint8mf4_t vs2, size_t vl) { +vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vint8mf4_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f16mf2_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfwcvt_f_x_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vint8mf2_t vs2, size_t vl) { +vfloat16m1_t test_vfwcvt_f_x_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vint8mf2_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f16m1_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfwcvt_f_x_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vint8m1_t vs2, size_t vl) { +vfloat16m2_t test_vfwcvt_f_x_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vint8m1_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f16m2_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfwcvt_f_x_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vint8m2_t vs2, size_t vl) { +vfloat16m4_t test_vfwcvt_f_x_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vint8m2_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f16m4_tumu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfwcvt_f_x_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vint8m4_t vs2, size_t vl) { +vfloat16m8_t test_vfwcvt_f_x_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vint8m4_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f16m8_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vuint8mf8_t vs2, size_t vl) { +vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vuint8mf8_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f16mf4_tumu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vuint8mf4_t vs2, size_t vl) { +vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vuint8mf4_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f16mf2_tumu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vuint8mf2_t vs2, size_t vl) { +vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vuint8mf2_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f16m1_tumu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vuint8m1_t vs2, size_t vl) { +vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vuint8m1_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f16m2_tumu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vuint8m2_t vs2, size_t vl) { +vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vuint8m2_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f16m4_tumu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vuint8m4_t vs2, size_t vl) { +vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vuint8m4_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f16m8_tumu(vm, vd, vs2, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i32mf2_tumu(vm, vd, vs2, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { +vint32m1_t test_vfwcvt_x_f_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i32m1_tumu(vm, vd, vs2, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { +vint32m2_t test_vfwcvt_x_f_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i32m2_tumu(vm, vd, vs2, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { +vint32m4_t test_vfwcvt_x_f_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i32m4_tumu(vm, vd, vs2, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { +vint32m8_t test_vfwcvt_x_f_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i32m8_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u32mf2_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m1_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m2_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m4_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m8_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vint16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vint16mf4_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f32mf2_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_x_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vint16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwcvt_f_x_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vint16mf2_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f32m1_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_x_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vint16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwcvt_f_x_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vint16m1_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f32m2_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_x_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vint16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwcvt_f_x_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vint16m2_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f32m4_tumu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_x_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vint16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwcvt_f_x_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vint16m4_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f32m8_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vuint16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vuint16mf4_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f32mf2_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vuint16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vuint16mf2_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f32m1_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vuint16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vuint16m1_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f32m2_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vuint16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vuint16m2_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f32m4_tumu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vuint16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vuint16m4_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f32m8_tumu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwcvt_f_f_v_f32mf2_tumu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_f_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwcvt_f_f_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwcvt_f_f_v_f32m1_tumu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_f_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwcvt_f_f_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwcvt_f_f_v_f32m2_tumu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_f_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwcvt_f_f_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwcvt_f_f_v_f32m4_tumu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_f_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwcvt_f_f_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwcvt_f_f_v_f32m8_tumu(vm, vd, vs2, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { +vint64m1_t test_vfwcvt_x_f_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i64m1_tumu(vm, vd, vs2, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { +vint64m2_t test_vfwcvt_x_f_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i64m2_tumu(vm, vd, vs2, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { +vint64m4_t test_vfwcvt_x_f_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i64m4_tumu(vm, vd, vs2, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { +vint64m8_t test_vfwcvt_x_f_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i64m8_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m1_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m2_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m4_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m8_tumu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vint32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vint32mf2_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f64m1_tumu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_x_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vint32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwcvt_f_x_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vint32m1_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f64m2_tumu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_x_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vint32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwcvt_f_x_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vint32m2_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f64m4_tumu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_x_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vint32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwcvt_f_x_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vint32m4_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f64m8_tumu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vuint32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vuint32mf2_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f64m1_tumu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vuint32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vuint32m1_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f64m2_tumu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vuint32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vuint32m2_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f64m4_tumu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vuint32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vuint32m4_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f64m8_tumu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwcvt_f_f_v_f64m1_tumu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_f_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwcvt_f_f_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwcvt_f_f_v_f64m2_tumu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_f_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwcvt_f_f_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwcvt_f_f_v_f64m4_tumu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_f_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwcvt_f_f_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwcvt_f_f_v_f64m8_tumu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vint8mf8_t vs2, size_t vl) { +vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vint8mf8_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f16mf4_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vint8mf4_t vs2, size_t vl) { +vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vint8mf4_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f16mf2_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfwcvt_f_x_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vint8mf2_t vs2, size_t vl) { +vfloat16m1_t test_vfwcvt_f_x_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vint8mf2_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f16m1_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfwcvt_f_x_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vint8m1_t vs2, size_t vl) { +vfloat16m2_t test_vfwcvt_f_x_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vint8m1_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f16m2_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfwcvt_f_x_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vint8m2_t vs2, size_t vl) { +vfloat16m4_t test_vfwcvt_f_x_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vint8m2_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f16m4_mu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfwcvt_f_x_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vint8m4_t vs2, size_t vl) { +vfloat16m8_t test_vfwcvt_f_x_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vint8m4_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f16m8_mu(vm, vd, vs2, vl); } -vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vuint8mf8_t vs2, size_t vl) { +vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vuint8mf8_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f16mf4_mu(vm, vd, vs2, vl); } -vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vuint8mf4_t vs2, size_t vl) { +vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vuint8mf4_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f16mf2_mu(vm, vd, vs2, vl); } -vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vuint8mf2_t vs2, size_t vl) { +vfloat16m1_t test_vfwcvt_f_xu_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vuint8mf2_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f16m1_mu(vm, vd, vs2, vl); } -vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vuint8m1_t vs2, size_t vl) { +vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vuint8m1_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f16m2_mu(vm, vd, vs2, vl); } -vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vuint8m2_t vs2, size_t vl) { +vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vuint8m2_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f16m4_mu(vm, vd, vs2, vl); } -vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vuint8m4_t vs2, size_t vl) { +vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vuint8m4_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f16m8_mu(vm, vd, vs2, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i32mf2_mu(vm, vd, vs2, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { +vint32m1_t test_vfwcvt_x_f_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i32m1_mu(vm, vd, vs2, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { +vint32m2_t test_vfwcvt_x_f_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i32m2_mu(vm, vd, vs2, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { +vint32m4_t test_vfwcvt_x_f_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i32m4_mu(vm, vd, vs2, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { +vint32m8_t test_vfwcvt_x_f_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i32m8_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u32mf2_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m1_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m2_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m4_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m8_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vint16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vint16mf4_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f32mf2_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_x_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vint16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwcvt_f_x_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vint16mf2_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f32m1_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_x_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vint16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwcvt_f_x_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vint16m1_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f32m2_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_x_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vint16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwcvt_f_x_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vint16m2_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f32m4_mu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_x_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vint16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwcvt_f_x_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vint16m4_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f32m8_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vuint16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vuint16mf4_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f32mf2_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vuint16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwcvt_f_xu_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vuint16mf2_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f32m1_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vuint16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vuint16m1_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f32m2_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vuint16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vuint16m2_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f32m4_mu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vuint16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vuint16m4_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f32m8_mu(vm, vd, vs2, vl); } -vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwcvt_f_f_v_f32mf2_mu(vm, vd, vs2, vl); } -vfloat32m1_t test_vfwcvt_f_f_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwcvt_f_f_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwcvt_f_f_v_f32m1_mu(vm, vd, vs2, vl); } -vfloat32m2_t test_vfwcvt_f_f_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwcvt_f_f_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwcvt_f_f_v_f32m2_mu(vm, vd, vs2, vl); } -vfloat32m4_t test_vfwcvt_f_f_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwcvt_f_f_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwcvt_f_f_v_f32m4_mu(vm, vd, vs2, vl); } -vfloat32m8_t test_vfwcvt_f_f_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwcvt_f_f_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwcvt_f_f_v_f32m8_mu(vm, vd, vs2, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { +vint64m1_t test_vfwcvt_x_f_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i64m1_mu(vm, vd, vs2, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { +vint64m2_t test_vfwcvt_x_f_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i64m2_mu(vm, vd, vs2, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { +vint64m4_t test_vfwcvt_x_f_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i64m4_mu(vm, vd, vs2, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { +vint64m8_t test_vfwcvt_x_f_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i64m8_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m1_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m2_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m4_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m8_mu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_x_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vint32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwcvt_f_x_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vint32mf2_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f64m1_mu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_x_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vint32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwcvt_f_x_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vint32m1_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f64m2_mu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_x_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vint32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwcvt_f_x_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vint32m2_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f64m4_mu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_x_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vint32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwcvt_f_x_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vint32m4_t vs2, size_t vl) { return __riscv_vfwcvt_f_x_v_f64m8_mu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vuint32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwcvt_f_xu_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vuint32mf2_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f64m1_mu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vuint32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vuint32m1_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f64m2_mu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vuint32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vuint32m2_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f64m4_mu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vuint32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vuint32m4_t vs2, size_t vl) { return __riscv_vfwcvt_f_xu_v_f64m8_mu(vm, vd, vs2, vl); } -vfloat64m1_t test_vfwcvt_f_f_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwcvt_f_f_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwcvt_f_f_v_f64m1_mu(vm, vd, vs2, vl); } -vfloat64m2_t test_vfwcvt_f_f_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwcvt_f_f_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwcvt_f_f_v_f64m2_mu(vm, vd, vs2, vl); } -vfloat64m4_t test_vfwcvt_f_f_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwcvt_f_f_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwcvt_f_f_v_f64m4_mu(vm, vd, vs2, vl); } -vfloat64m8_t test_vfwcvt_f_f_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwcvt_f_f_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwcvt_f_f_v_f64m8_mu(vm, vd, vs2, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm_tu(vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm_tu(vint32mf2_t vd, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwcvt_x_f_v_i32mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1_rm_tu(vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { +vint32m1_t test_vfwcvt_x_f_v_i32m1_rm_tu(vint32m1_t vd, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwcvt_x_f_v_i32m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2_rm_tu(vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { +vint32m2_t test_vfwcvt_x_f_v_i32m2_rm_tu(vint32m2_t vd, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwcvt_x_f_v_i32m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4_rm_tu(vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { +vint32m4_t test_vfwcvt_x_f_v_i32m4_rm_tu(vint32m4_t vd, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwcvt_x_f_v_i32m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8_rm_tu(vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { +vint32m8_t test_vfwcvt_x_f_v_i32m8_rm_tu(vint32m8_t vd, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwcvt_x_f_v_i32m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm_tu(vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm_tu(vuint32mf2_t vd, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwcvt_xu_f_v_u32mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm_tu(vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm_tu(vuint32m1_t vd, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm_tu(vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm_tu(vuint32m2_t vd, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm_tu(vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm_tu(vuint32m4_t vd, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm_tu(vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm_tu(vuint32m8_t vd, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1_rm_tu(vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { +vint64m1_t test_vfwcvt_x_f_v_i64m1_rm_tu(vint64m1_t vd, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwcvt_x_f_v_i64m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2_rm_tu(vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { +vint64m2_t test_vfwcvt_x_f_v_i64m2_rm_tu(vint64m2_t vd, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwcvt_x_f_v_i64m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4_rm_tu(vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { +vint64m4_t test_vfwcvt_x_f_v_i64m4_rm_tu(vint64m4_t vd, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwcvt_x_f_v_i64m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8_rm_tu(vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { +vint64m8_t test_vfwcvt_x_f_v_i64m8_rm_tu(vint64m8_t vd, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwcvt_x_f_v_i64m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm_tu(vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm_tu(vuint64m1_t vd, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm_tu(vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm_tu(vuint64m2_t vd, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm_tu(vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm_tu(vuint64m4_t vd, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm_tu(vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm_tu(vuint64m8_t vd, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm_tum(vbool64_t vm, vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm_tum(vbool64_t vm, vint32mf2_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i32mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1_rm_tum(vbool32_t vm, vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { +vint32m1_t test_vfwcvt_x_f_v_i32m1_rm_tum(vbool32_t vm, vint32m1_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i32m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2_rm_tum(vbool16_t vm, vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { +vint32m2_t test_vfwcvt_x_f_v_i32m2_rm_tum(vbool16_t vm, vint32m2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i32m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4_rm_tum(vbool8_t vm, vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { +vint32m4_t test_vfwcvt_x_f_v_i32m4_rm_tum(vbool8_t vm, vint32m4_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i32m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8_rm_tum(vbool4_t vm, vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { +vint32m8_t test_vfwcvt_x_f_v_i32m8_rm_tum(vbool4_t vm, vint32m8_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i32m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm_tum(vbool64_t vm, vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm_tum(vbool64_t vm, vuint32mf2_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u32mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm_tum(vbool32_t vm, vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm_tum(vbool32_t vm, vuint32m1_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm_tum(vbool16_t vm, vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm_tum(vbool16_t vm, vuint32m2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm_tum(vbool8_t vm, vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm_tum(vbool8_t vm, vuint32m4_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm_tum(vbool4_t vm, vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm_tum(vbool4_t vm, vuint32m8_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1_rm_tum(vbool64_t vm, vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { +vint64m1_t test_vfwcvt_x_f_v_i64m1_rm_tum(vbool64_t vm, vint64m1_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i64m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2_rm_tum(vbool32_t vm, vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { +vint64m2_t test_vfwcvt_x_f_v_i64m2_rm_tum(vbool32_t vm, vint64m2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i64m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4_rm_tum(vbool16_t vm, vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { +vint64m4_t test_vfwcvt_x_f_v_i64m4_rm_tum(vbool16_t vm, vint64m4_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i64m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8_rm_tum(vbool8_t vm, vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { +vint64m8_t test_vfwcvt_x_f_v_i64m8_rm_tum(vbool8_t vm, vint64m8_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i64m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm_tum(vbool64_t vm, vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm_tum(vbool64_t vm, vuint64m1_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm_tum(vbool32_t vm, vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm_tum(vbool32_t vm, vuint64m2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm_tum(vbool16_t vm, vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm_tum(vbool16_t vm, vuint64m4_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm_tum(vbool8_t vm, vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm_tum(vbool8_t vm, vuint64m8_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm_tumu(vbool64_t vm, vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm_tumu(vbool64_t vm, vint32mf2_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i32mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1_rm_tumu(vbool32_t vm, vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { +vint32m1_t test_vfwcvt_x_f_v_i32m1_rm_tumu(vbool32_t vm, vint32m1_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i32m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2_rm_tumu(vbool16_t vm, vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { +vint32m2_t test_vfwcvt_x_f_v_i32m2_rm_tumu(vbool16_t vm, vint32m2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i32m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4_rm_tumu(vbool8_t vm, vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { +vint32m4_t test_vfwcvt_x_f_v_i32m4_rm_tumu(vbool8_t vm, vint32m4_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i32m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8_rm_tumu(vbool4_t vm, vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { +vint32m8_t test_vfwcvt_x_f_v_i32m8_rm_tumu(vbool4_t vm, vint32m8_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i32m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm_tumu(vbool64_t vm, vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm_tumu(vbool64_t vm, vuint32mf2_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u32mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm_tumu(vbool32_t vm, vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm_tumu(vbool32_t vm, vuint32m1_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm_tumu(vbool16_t vm, vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm_tumu(vbool16_t vm, vuint32m2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm_tumu(vbool8_t vm, vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm_tumu(vbool8_t vm, vuint32m4_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm_tumu(vbool4_t vm, vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm_tumu(vbool4_t vm, vuint32m8_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1_rm_tumu(vbool64_t vm, vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { +vint64m1_t test_vfwcvt_x_f_v_i64m1_rm_tumu(vbool64_t vm, vint64m1_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i64m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2_rm_tumu(vbool32_t vm, vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { +vint64m2_t test_vfwcvt_x_f_v_i64m2_rm_tumu(vbool32_t vm, vint64m2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i64m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4_rm_tumu(vbool16_t vm, vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { +vint64m4_t test_vfwcvt_x_f_v_i64m4_rm_tumu(vbool16_t vm, vint64m4_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i64m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8_rm_tumu(vbool8_t vm, vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { +vint64m8_t test_vfwcvt_x_f_v_i64m8_rm_tumu(vbool8_t vm, vint64m8_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i64m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm_tumu(vbool64_t vm, vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm_tumu(vbool64_t vm, vuint64m1_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm_tumu(vbool32_t vm, vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm_tumu(vbool32_t vm, vuint64m2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm_tumu(vbool16_t vm, vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm_tumu(vbool16_t vm, vuint64m4_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm_tumu(vbool8_t vm, vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm_tumu(vbool8_t vm, vuint64m8_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m8_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm_mu(vbool64_t vm, vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { +vint32mf2_t test_vfwcvt_x_f_v_i32mf2_rm_mu(vbool64_t vm, vint32mf2_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i32mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m1_t test_vfwcvt_x_f_v_i32m1_rm_mu(vbool32_t vm, vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { +vint32m1_t test_vfwcvt_x_f_v_i32m1_rm_mu(vbool32_t vm, vint32m1_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i32m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m2_t test_vfwcvt_x_f_v_i32m2_rm_mu(vbool16_t vm, vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { +vint32m2_t test_vfwcvt_x_f_v_i32m2_rm_mu(vbool16_t vm, vint32m2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i32m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m4_t test_vfwcvt_x_f_v_i32m4_rm_mu(vbool8_t vm, vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { +vint32m4_t test_vfwcvt_x_f_v_i32m4_rm_mu(vbool8_t vm, vint32m4_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i32m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint32m8_t test_vfwcvt_x_f_v_i32m8_rm_mu(vbool4_t vm, vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { +vint32m8_t test_vfwcvt_x_f_v_i32m8_rm_mu(vbool4_t vm, vint32m8_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i32m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm_mu(vbool64_t vm, vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2_rm_mu(vbool64_t vm, vuint32mf2_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u32mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm_mu(vbool32_t vm, vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint32m1_t test_vfwcvt_xu_f_v_u32m1_rm_mu(vbool32_t vm, vuint32m1_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm_mu(vbool16_t vm, vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_rm_mu(vbool16_t vm, vuint32m2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm_mu(vbool8_t vm, vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_rm_mu(vbool8_t vm, vuint32m4_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm_mu(vbool4_t vm, vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_rm_mu(vbool4_t vm, vuint32m8_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u32m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m1_t test_vfwcvt_x_f_v_i64m1_rm_mu(vbool64_t vm, vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { +vint64m1_t test_vfwcvt_x_f_v_i64m1_rm_mu(vbool64_t vm, vint64m1_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i64m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m2_t test_vfwcvt_x_f_v_i64m2_rm_mu(vbool32_t vm, vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { +vint64m2_t test_vfwcvt_x_f_v_i64m2_rm_mu(vbool32_t vm, vint64m2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i64m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m4_t test_vfwcvt_x_f_v_i64m4_rm_mu(vbool16_t vm, vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { +vint64m4_t test_vfwcvt_x_f_v_i64m4_rm_mu(vbool16_t vm, vint64m4_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i64m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vint64m8_t test_vfwcvt_x_f_v_i64m8_rm_mu(vbool8_t vm, vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { +vint64m8_t test_vfwcvt_x_f_v_i64m8_rm_mu(vbool8_t vm, vint64m8_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwcvt_x_f_v_i64m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm_mu(vbool64_t vm, vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint64m1_t test_vfwcvt_xu_f_v_u64m1_rm_mu(vbool64_t vm, vuint64m1_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm_mu(vbool32_t vm, vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_rm_mu(vbool32_t vm, vuint64m2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm_mu(vbool16_t vm, vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_rm_mu(vbool16_t vm, vuint64m4_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm_mu(vbool8_t vm, vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_rm_mu(vbool8_t vm, vuint64m8_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwcvt_xu_f_v_u64m8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfwcvt_rtz.c b/auto-generated/policy_funcs/llvm-api-tests/vfwcvt_rtz.c index b13afd7c7..7f821ec29 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfwcvt_rtz.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfwcvt_rtz.c @@ -1,295 +1,367 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tu(vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tu(vint32mf2_t vd, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i32mf2_tu(vd, vs2, vl); } -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tu(vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tu(vint32m1_t vd, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i32m1_tu(vd, vs2, vl); } -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tu(vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tu(vint32m2_t vd, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i32m2_tu(vd, vs2, vl); } -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tu(vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tu(vint32m4_t vd, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i32m4_tu(vd, vs2, vl); } -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tu(vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tu(vint32m8_t vd, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i32m8_tu(vd, vs2, vl); } -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tu(vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tu(vuint32mf2_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tu(vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tu(vuint32m1_t vd, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tu(vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tu(vuint32m2_t vd, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tu(vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tu(vuint32m4_t vd, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tu(vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tu(vuint32m8_t vd, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u32m8_tu(vd, vs2, vl); } -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tu(vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tu(vint64m1_t vd, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i64m1_tu(vd, vs2, vl); } -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tu(vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tu(vint64m2_t vd, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i64m2_tu(vd, vs2, vl); } -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tu(vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tu(vint64m4_t vd, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i64m4_tu(vd, vs2, vl); } -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tu(vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tu(vint64m8_t vd, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i64m8_tu(vd, vs2, vl); } -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tu(vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tu(vuint64m1_t vd, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u64m1_tu(vd, vs2, vl); } -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tu(vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tu(vuint64m2_t vd, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u64m2_tu(vd, vs2, vl); } -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tu(vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tu(vuint64m4_t vd, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u64m4_tu(vd, vs2, vl); } -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tu(vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tu(vuint64m8_t vd, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u64m8_tu(vd, vs2, vl); } -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i32mf2_tum(vm, vd, vs2, vl); } -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i32m1_tum(vm, vd, vs2, vl); } -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i32m2_tum(vm, vd, vs2, vl); } -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i32m4_tum(vm, vd, vs2, vl); } -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i32m8_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u32mf2_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u32m1_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u32m2_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u32m4_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u32m8_tum(vm, vd, vs2, vl); } -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i64m1_tum(vm, vd, vs2, vl); } -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i64m2_tum(vm, vd, vs2, vl); } -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i64m4_tum(vm, vd, vs2, vl); } -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i64m8_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u64m1_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u64m2_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u64m4_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u64m8_tum(vm, vd, vs2, vl); } -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i32mf2_tumu(vm, vd, vs2, vl); } -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i32m1_tumu(vm, vd, vs2, vl); } -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i32m2_tumu(vm, vd, vs2, vl); } -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i32m4_tumu(vm, vd, vs2, vl); } -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i32m8_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u32mf2_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u32m1_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u32m2_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u32m4_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u32m8_tumu(vm, vd, vs2, vl); } -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i64m1_tumu(vm, vd, vs2, vl); } -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i64m2_tumu(vm, vd, vs2, vl); } -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i64m4_tumu(vm, vd, vs2, vl); } -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i64m8_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u64m1_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u64m2_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u64m4_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u64m8_tumu(vm, vd, vs2, vl); } -vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { +vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i32mf2_mu(vm, vd, vs2, vl); } -vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { +vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i32m1_mu(vm, vd, vs2, vl); } -vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, vfloat16m1_t vs2, size_t vl) { +vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i32m2_mu(vm, vd, vs2, vl); } -vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, vfloat16m2_t vs2, size_t vl) { +vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i32m4_mu(vm, vd, vs2, vl); } -vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, vfloat16m4_t vs2, size_t vl) { +vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i32m8_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vfloat16mf4_t vs2, size_t vl) { +vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u32mf2_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vfloat16mf2_t vs2, size_t vl) { +vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u32m1_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vfloat16m1_t vs2, size_t vl) { +vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u32m2_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vfloat16m2_t vs2, size_t vl) { +vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u32m4_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vfloat16m4_t vs2, size_t vl) { +vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u32m8_mu(vm, vd, vs2, vl); } -vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { +vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i64m1_mu(vm, vd, vs2, vl); } -vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, vfloat32m1_t vs2, size_t vl) { +vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i64m2_mu(vm, vd, vs2, vl); } -vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, vfloat32m2_t vs2, size_t vl) { +vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i64m4_mu(vm, vd, vs2, vl); } -vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, vfloat32m4_t vs2, size_t vl) { +vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_x_f_v_i64m8_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vfloat32mf2_t vs2, size_t vl) { +vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u64m1_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vfloat32m1_t vs2, size_t vl) { +vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u64m2_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vfloat32m2_t vs2, size_t vl) { +vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u64m4_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vfloat32m4_t vs2, size_t vl) { +vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwcvt_rtz_xu_f_v_u64m8_mu(vm, vd, vs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfwmacc.c b/auto-generated/policy_funcs/llvm-api-tests/vfwmacc.c index 4be411c6e..c32736de3 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfwmacc.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfwmacc.c @@ -1,583 +1,846 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32mf2_t test_vfwmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwmacc_vv_f32mf2_tu(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmacc_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmacc_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwmacc_vf_f32mf2_tu(vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwmacc_vv_f32m1_tu(vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmacc_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmacc_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwmacc_vf_f32m1_tu(vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwmacc_vv_f32m2_tu(vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmacc_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmacc_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwmacc_vf_f32m2_tu(vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwmacc_vv_f32m4_tu(vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmacc_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmacc_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwmacc_vf_f32m4_tu(vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwmacc_vv_f32m8_tu(vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmacc_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmacc_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwmacc_vf_f32m8_tu(vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwmacc_vv_f64m1_tu(vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmacc_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwmacc_vf_f64m1_tu(vfloat64m1_t vd, float vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwmacc_vf_f64m1_tu(vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwmacc_vv_f64m2_tu(vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmacc_vf_f64m2_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwmacc_vf_f64m2_tu(vfloat64m2_t vd, float vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwmacc_vf_f64m2_tu(vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwmacc_vv_f64m4_tu(vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmacc_vf_f64m4_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwmacc_vf_f64m4_tu(vfloat64m4_t vd, float vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwmacc_vf_f64m4_tu(vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwmacc_vv_f64m8_tu(vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmacc_vf_f64m8_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwmacc_vf_f64m8_tu(vfloat64m8_t vd, float vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwmacc_vf_f64m8_tu(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmacc_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmacc_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmacc_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmacc_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + _Float16 vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmacc_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmacc_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmacc_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmacc_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + _Float16 vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmacc_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmacc_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmacc_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmacc_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + _Float16 vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmacc_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmacc_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmacc_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmacc_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + _Float16 vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmacc_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmacc_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmacc_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmacc_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + _Float16 vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmacc_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwmacc_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmacc_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwmacc_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, float vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwmacc_vf_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmacc_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwmacc_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmacc_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwmacc_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, float vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwmacc_vf_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmacc_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwmacc_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmacc_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwmacc_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, float vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwmacc_vf_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmacc_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwmacc_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmacc_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwmacc_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwmacc_vf_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmacc_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmacc_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmacc_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmacc_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + _Float16 vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmacc_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmacc_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmacc_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmacc_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + _Float16 vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmacc_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmacc_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmacc_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmacc_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + _Float16 vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmacc_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmacc_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmacc_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmacc_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + _Float16 vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmacc_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmacc_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmacc_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmacc_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + _Float16 vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmacc_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwmacc_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmacc_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwmacc_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + float vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmacc_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwmacc_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmacc_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwmacc_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + float vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmacc_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwmacc_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmacc_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwmacc_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + float vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmacc_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwmacc_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmacc_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwmacc_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, float vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwmacc_vf_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmacc_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmacc_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmacc_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmacc_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + _Float16 vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmacc_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmacc_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmacc_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmacc_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + _Float16 vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmacc_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmacc_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmacc_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmacc_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + _Float16 vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmacc_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmacc_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmacc_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmacc_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + _Float16 vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmacc_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmacc_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmacc_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmacc_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + _Float16 vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmacc_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwmacc_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmacc_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwmacc_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwmacc_vf_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmacc_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwmacc_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmacc_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwmacc_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwmacc_vf_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmacc_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwmacc_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmacc_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwmacc_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwmacc_vf_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmacc_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwmacc_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f64m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmacc_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwmacc_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwmacc_vf_f64m8_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmacc_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmacc_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwmacc_vv_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmacc_vf_f32mf2_rm_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmacc_vf_f32mf2_rm_tu(vfloat32mf2_t vd, _Float16 vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwmacc_vf_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmacc_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmacc_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwmacc_vv_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmacc_vf_f32m1_rm_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmacc_vf_f32m1_rm_tu(vfloat32m1_t vd, _Float16 vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwmacc_vf_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmacc_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmacc_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwmacc_vv_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmacc_vf_f32m2_rm_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmacc_vf_f32m2_rm_tu(vfloat32m2_t vd, _Float16 vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwmacc_vf_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmacc_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmacc_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwmacc_vv_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmacc_vf_f32m4_rm_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmacc_vf_f32m4_rm_tu(vfloat32m4_t vd, _Float16 vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwmacc_vf_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmacc_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmacc_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwmacc_vv_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmacc_vf_f32m8_rm_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmacc_vf_f32m8_rm_tu(vfloat32m8_t vd, _Float16 vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwmacc_vf_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmacc_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwmacc_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwmacc_vv_f64m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmacc_vf_f64m1_rm_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwmacc_vf_f64m1_rm_tu(vfloat64m1_t vd, float vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwmacc_vf_f64m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmacc_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwmacc_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwmacc_vv_f64m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmacc_vf_f64m2_rm_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwmacc_vf_f64m2_rm_tu(vfloat64m2_t vd, float vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwmacc_vf_f64m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmacc_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwmacc_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwmacc_vv_f64m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmacc_vf_f64m4_rm_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwmacc_vf_f64m4_rm_tu(vfloat64m4_t vd, float vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwmacc_vf_f64m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmacc_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwmacc_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwmacc_vv_f64m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmacc_vf_f64m8_rm_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwmacc_vf_f64m8_rm_tu(vfloat64m8_t vd, float vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwmacc_vf_f64m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmacc_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmacc_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfwmacc_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmacc_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + _Float16 vs1, vfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfwmacc_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmacc_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmacc_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmacc_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + _Float16 vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmacc_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmacc_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmacc_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmacc_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + _Float16 vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmacc_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmacc_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmacc_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmacc_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + _Float16 vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmacc_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmacc_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmacc_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmacc_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + _Float16 vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmacc_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwmacc_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmacc_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwmacc_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + float vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmacc_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwmacc_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmacc_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwmacc_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + float vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmacc_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwmacc_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmacc_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwmacc_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + float vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmacc_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwmacc_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmacc_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwmacc_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + float vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmacc_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmacc_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfwmacc_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmacc_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + _Float16 vs1, vfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfwmacc_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmacc_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfwmacc_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmacc_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + _Float16 vs1, vfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m2_t test_vfwmacc_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmacc_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m2_t test_vfwmacc_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmacc_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + _Float16 vs1, vfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m4_t test_vfwmacc_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmacc_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m4_t test_vfwmacc_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmacc_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + _Float16 vs1, vfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m8_t test_vfwmacc_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmacc_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m8_t test_vfwmacc_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmacc_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + _Float16 vs1, vfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfwmacc_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmacc_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfwmacc_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmacc_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + float vs1, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m2_t test_vfwmacc_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmacc_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m2_t test_vfwmacc_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmacc_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + float vs1, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m4_t test_vfwmacc_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmacc_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m4_t test_vfwmacc_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmacc_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + float vs1, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m8_t test_vfwmacc_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmacc_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m8_t test_vfwmacc_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmacc_vf_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmacc_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + float vs1, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfwmacc_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmacc_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmacc_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmacc_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + _Float16 vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmacc_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmacc_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmacc_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmacc_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + _Float16 vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmacc_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmacc_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmacc_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmacc_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + _Float16 vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmacc_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmacc_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmacc_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmacc_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + _Float16 vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmacc_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmacc_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmacc_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmacc_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + _Float16 vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmacc_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwmacc_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmacc_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwmacc_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + float vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmacc_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwmacc_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmacc_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwmacc_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + float vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmacc_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwmacc_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmacc_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwmacc_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + float vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmacc_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwmacc_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwmacc_vv_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmacc_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwmacc_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + float vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwmacc_vf_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfwmsac.c b/auto-generated/policy_funcs/llvm-api-tests/vfwmsac.c index 40e33de36..ed0c3e4c4 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfwmsac.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfwmsac.c @@ -1,583 +1,846 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32mf2_t test_vfwmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwmsac_vv_f32mf2_tu(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmsac_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmsac_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwmsac_vf_f32mf2_tu(vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwmsac_vv_f32m1_tu(vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmsac_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmsac_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwmsac_vf_f32m1_tu(vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwmsac_vv_f32m2_tu(vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmsac_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmsac_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwmsac_vf_f32m2_tu(vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwmsac_vv_f32m4_tu(vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmsac_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmsac_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwmsac_vf_f32m4_tu(vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwmsac_vv_f32m8_tu(vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmsac_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmsac_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwmsac_vf_f32m8_tu(vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwmsac_vv_f64m1_tu(vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmsac_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwmsac_vf_f64m1_tu(vfloat64m1_t vd, float vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwmsac_vf_f64m1_tu(vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwmsac_vv_f64m2_tu(vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmsac_vf_f64m2_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwmsac_vf_f64m2_tu(vfloat64m2_t vd, float vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwmsac_vf_f64m2_tu(vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwmsac_vv_f64m4_tu(vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmsac_vf_f64m4_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwmsac_vf_f64m4_tu(vfloat64m4_t vd, float vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwmsac_vf_f64m4_tu(vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwmsac_vv_f64m8_tu(vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmsac_vf_f64m8_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwmsac_vf_f64m8_tu(vfloat64m8_t vd, float vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwmsac_vf_f64m8_tu(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmsac_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmsac_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmsac_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmsac_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + _Float16 vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmsac_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmsac_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmsac_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmsac_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + _Float16 vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmsac_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmsac_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmsac_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmsac_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + _Float16 vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmsac_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmsac_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmsac_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmsac_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + _Float16 vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmsac_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmsac_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmsac_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmsac_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + _Float16 vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmsac_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwmsac_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmsac_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwmsac_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, float vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwmsac_vf_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmsac_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwmsac_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmsac_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwmsac_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, float vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwmsac_vf_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmsac_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwmsac_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmsac_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwmsac_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, float vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwmsac_vf_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmsac_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwmsac_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmsac_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwmsac_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwmsac_vf_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmsac_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmsac_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmsac_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmsac_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + _Float16 vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmsac_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmsac_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmsac_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmsac_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + _Float16 vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmsac_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmsac_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmsac_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmsac_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + _Float16 vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmsac_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmsac_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmsac_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmsac_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + _Float16 vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmsac_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmsac_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmsac_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmsac_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + _Float16 vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmsac_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwmsac_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmsac_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwmsac_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + float vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmsac_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwmsac_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmsac_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwmsac_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + float vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmsac_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwmsac_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmsac_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwmsac_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + float vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmsac_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwmsac_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmsac_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwmsac_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, float vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwmsac_vf_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmsac_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmsac_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmsac_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmsac_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + _Float16 vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmsac_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmsac_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwmsac_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmsac_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + _Float16 vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmsac_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmsac_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwmsac_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmsac_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + _Float16 vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmsac_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmsac_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwmsac_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmsac_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + _Float16 vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmsac_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmsac_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwmsac_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmsac_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + _Float16 vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmsac_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwmsac_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwmsac_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwmsac_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwmsac_vf_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmsac_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwmsac_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwmsac_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwmsac_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwmsac_vf_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmsac_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwmsac_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwmsac_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwmsac_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwmsac_vf_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmsac_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwmsac_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f64m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwmsac_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwmsac_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwmsac_vf_f64m8_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwmsac_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmsac_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwmsac_vv_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmsac_vf_f32mf2_rm_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmsac_vf_f32mf2_rm_tu(vfloat32mf2_t vd, _Float16 vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwmsac_vf_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmsac_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmsac_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwmsac_vv_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmsac_vf_f32m1_rm_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmsac_vf_f32m1_rm_tu(vfloat32m1_t vd, _Float16 vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwmsac_vf_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmsac_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmsac_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwmsac_vv_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmsac_vf_f32m2_rm_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmsac_vf_f32m2_rm_tu(vfloat32m2_t vd, _Float16 vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwmsac_vf_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmsac_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmsac_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwmsac_vv_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmsac_vf_f32m4_rm_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmsac_vf_f32m4_rm_tu(vfloat32m4_t vd, _Float16 vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwmsac_vf_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmsac_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmsac_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwmsac_vv_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmsac_vf_f32m8_rm_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmsac_vf_f32m8_rm_tu(vfloat32m8_t vd, _Float16 vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwmsac_vf_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmsac_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwmsac_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwmsac_vv_f64m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmsac_vf_f64m1_rm_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwmsac_vf_f64m1_rm_tu(vfloat64m1_t vd, float vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwmsac_vf_f64m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmsac_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwmsac_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwmsac_vv_f64m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmsac_vf_f64m2_rm_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwmsac_vf_f64m2_rm_tu(vfloat64m2_t vd, float vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwmsac_vf_f64m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmsac_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwmsac_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwmsac_vv_f64m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmsac_vf_f64m4_rm_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwmsac_vf_f64m4_rm_tu(vfloat64m4_t vd, float vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwmsac_vf_f64m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmsac_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwmsac_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwmsac_vv_f64m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmsac_vf_f64m8_rm_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwmsac_vf_f64m8_rm_tu(vfloat64m8_t vd, float vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwmsac_vf_f64m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmsac_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmsac_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfwmsac_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmsac_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + _Float16 vs1, vfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfwmsac_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmsac_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmsac_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmsac_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + _Float16 vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmsac_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmsac_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmsac_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmsac_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + _Float16 vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmsac_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmsac_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmsac_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmsac_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + _Float16 vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmsac_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmsac_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmsac_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmsac_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + _Float16 vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmsac_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwmsac_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmsac_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwmsac_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + float vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmsac_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwmsac_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmsac_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwmsac_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + float vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmsac_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwmsac_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmsac_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwmsac_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + float vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmsac_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwmsac_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmsac_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwmsac_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + float vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmsac_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmsac_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfwmsac_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmsac_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + _Float16 vs1, vfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfwmsac_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmsac_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfwmsac_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwmsac_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + _Float16 vs1, vfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m2_t test_vfwmsac_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmsac_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m2_t test_vfwmsac_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwmsac_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + _Float16 vs1, vfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m4_t test_vfwmsac_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmsac_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m4_t test_vfwmsac_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwmsac_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + _Float16 vs1, vfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m8_t test_vfwmsac_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmsac_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m8_t test_vfwmsac_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwmsac_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + _Float16 vs1, vfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfwmsac_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmsac_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfwmsac_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwmsac_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + float vs1, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m2_t test_vfwmsac_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmsac_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m2_t test_vfwmsac_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwmsac_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + float vs1, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m4_t test_vfwmsac_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmsac_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m4_t test_vfwmsac_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwmsac_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + float vs1, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m8_t test_vfwmsac_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmsac_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m8_t test_vfwmsac_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwmsac_vf_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwmsac_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + float vs1, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfwmsac_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmsac_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmsac_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwmsac_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + _Float16 vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmsac_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmsac_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmsac_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwmsac_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + _Float16 vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmsac_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmsac_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmsac_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwmsac_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + _Float16 vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmsac_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmsac_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmsac_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwmsac_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + _Float16 vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmsac_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmsac_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmsac_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwmsac_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + _Float16 vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmsac_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwmsac_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmsac_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwmsac_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + float vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmsac_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwmsac_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmsac_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwmsac_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + float vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmsac_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwmsac_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmsac_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwmsac_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + float vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmsac_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwmsac_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwmsac_vv_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmsac_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwmsac_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + float vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwmsac_vf_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfwmul.c b/auto-generated/policy_funcs/llvm-api-tests/vfwmul.c index 54141a997..4be35ffdb 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfwmul.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfwmul.c @@ -1,583 +1,826 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32mf2_t test_vfwmul_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwmul_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, + vfloat16mf4_t vs1, size_t vl) { return __riscv_vfwmul_vv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwmul_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwmul_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwmul_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwmul_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, + vfloat16mf2_t vs1, size_t vl) { return __riscv_vfwmul_vv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwmul_vf_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwmul_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwmul_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfwmul_vv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwmul_vf_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwmul_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwmul_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs2, + vfloat16m2_t vs1, size_t vl) { return __riscv_vfwmul_vv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwmul_vf_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwmul_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwmul_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs2, + vfloat16m4_t vs1, size_t vl) { return __riscv_vfwmul_vv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwmul_vf_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwmul_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwmul_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, + vfloat32mf2_t vs1, size_t vl) { return __riscv_vfwmul_vv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwmul_vf_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, + float rs1, size_t vl) { return __riscv_vfwmul_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwmul_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwmul_vv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwmul_vf_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs2, + float rs1, size_t vl) { return __riscv_vfwmul_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwmul_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs2, + vfloat32m2_t vs1, size_t vl) { return __riscv_vfwmul_vv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwmul_vf_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs2, + float rs1, size_t vl) { return __riscv_vfwmul_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwmul_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs2, + vfloat32m4_t vs1, size_t vl) { return __riscv_vfwmul_vv_f64m8_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwmul_vf_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs2, + float rs1, size_t vl) { return __riscv_vfwmul_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwmul_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwmul_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwmul_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwmul_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwmul_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwmul_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwmul_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwmul_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwmul_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwmul_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwmul_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwmul_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwmul_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwmul_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwmul_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwmul_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwmul_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwmul_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwmul_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, float rs1, size_t vl) { return __riscv_vfwmul_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwmul_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwmul_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfwmul_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwmul_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwmul_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfwmul_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwmul_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwmul_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfwmul_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwmul_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwmul_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwmul_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwmul_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwmul_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwmul_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwmul_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwmul_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwmul_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwmul_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwmul_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwmul_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwmul_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwmul_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwmul_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwmul_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwmul_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwmul_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwmul_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfwmul_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwmul_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwmul_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfwmul_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwmul_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwmul_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfwmul_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwmul_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwmul_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfwmul_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwmul_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwmul_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwmul_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwmul_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwmul_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwmul_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwmul_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwmul_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwmul_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwmul_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwmul_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwmul_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwmul_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwmul_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwmul_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwmul_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwmul_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwmul_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwmul_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, float rs1, size_t vl) { return __riscv_vfwmul_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwmul_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwmul_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfwmul_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwmul_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwmul_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfwmul_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwmul_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwmul_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfwmul_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwmul_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwmul_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, + vfloat16mf4_t vs1, size_t vl) { return __riscv_vfwmul_vv_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwmul_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwmul_vf_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmul_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwmul_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, + vfloat16mf2_t vs1, size_t vl) { return __riscv_vfwmul_vv_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwmul_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwmul_vf_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwmul_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfwmul_vv_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwmul_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwmul_vf_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwmul_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs2, + vfloat16m2_t vs1, size_t vl) { return __riscv_vfwmul_vv_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwmul_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwmul_vf_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwmul_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs2, + vfloat16m4_t vs1, size_t vl) { return __riscv_vfwmul_vv_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwmul_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwmul_vf_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwmul_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, + vfloat32mf2_t vs1, size_t vl) { return __riscv_vfwmul_vv_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwmul_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, + float rs1, size_t vl) { return __riscv_vfwmul_vf_f64m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwmul_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwmul_vv_f64m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwmul_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs2, + float rs1, size_t vl) { return __riscv_vfwmul_vf_f64m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwmul_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs2, + vfloat32m2_t vs1, size_t vl) { return __riscv_vfwmul_vv_f64m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwmul_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs2, + float rs1, size_t vl) { return __riscv_vfwmul_vf_f64m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwmul_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs2, + vfloat32m4_t vs1, size_t vl) { return __riscv_vfwmul_vv_f64m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwmul_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs2, + float rs1, size_t vl) { return __riscv_vfwmul_vf_f64m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmul_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwmul_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f32mf2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwmul_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwmul_vf_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmul_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwmul_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwmul_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwmul_vf_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwmul_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f32m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwmul_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwmul_vf_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwmul_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f32m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwmul_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwmul_vf_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwmul_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f32m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwmul_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwmul_vf_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwmul_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwmul_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfwmul_vf_f64m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwmul_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f64m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwmul_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, float rs1, + size_t vl) { return __riscv_vfwmul_vf_f64m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwmul_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f64m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwmul_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, float rs1, + size_t vl) { return __riscv_vfwmul_vf_f64m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwmul_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f64m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwmul_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, float rs1, + size_t vl) { return __riscv_vfwmul_vf_f64m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmul_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { - return __riscv_vfwmul_vv_f32mf2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmul_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, + vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_f32mf2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { - return __riscv_vfwmul_vf_f32mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwmul_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_f32mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfwmul_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwmul_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f32m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwmul_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwmul_vf_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwmul_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f32m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwmul_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwmul_vf_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwmul_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f32m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwmul_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwmul_vf_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwmul_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f32m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwmul_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwmul_vf_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwmul_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f64m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwmul_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfwmul_vf_f64m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwmul_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f64m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwmul_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, float rs1, + size_t vl) { return __riscv_vfwmul_vf_f64m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwmul_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f64m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwmul_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, float rs1, + size_t vl) { return __riscv_vfwmul_vf_f64m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwmul_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f64m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwmul_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, float rs1, + size_t vl) { return __riscv_vfwmul_vf_f64m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmul_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwmul_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f32mf2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwmul_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwmul_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwmul_vf_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmul_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwmul_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f32m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwmul_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwmul_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwmul_vf_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwmul_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwmul_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwmul_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwmul_vf_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwmul_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwmul_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwmul_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwmul_vf_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwmul_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwmul_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwmul_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwmul_vf_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwmul_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f64m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwmul_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwmul_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfwmul_vf_f64m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwmul_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f64m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwmul_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwmul_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, float rs1, + size_t vl) { return __riscv_vfwmul_vf_f64m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwmul_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f64m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwmul_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwmul_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, float rs1, + size_t vl) { return __riscv_vfwmul_vf_f64m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwmul_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfwmul_vv_f64m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwmul_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwmul_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, float rs1, + size_t vl) { return __riscv_vfwmul_vf_f64m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfwnmacc.c b/auto-generated/policy_funcs/llvm-api-tests/vfwnmacc.c index 8d13aa0ee..0612089f9 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfwnmacc.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfwnmacc.c @@ -1,583 +1,868 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32mf2_t test_vfwnmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwnmacc_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwnmacc_vv_f32mf2_tu(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmacc_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwnmacc_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwnmacc_vf_f32mf2_tu(vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmacc_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwnmacc_vv_f32m1_tu(vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmacc_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmacc_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwnmacc_vf_f32m1_tu(vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmacc_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwnmacc_vv_f32m2_tu(vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmacc_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmacc_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwnmacc_vf_f32m2_tu(vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmacc_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwnmacc_vv_f32m4_tu(vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmacc_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmacc_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwnmacc_vf_f32m4_tu(vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmacc_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwnmacc_vv_f32m8_tu(vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmacc_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmacc_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwnmacc_vf_f32m8_tu(vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwnmacc_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwnmacc_vv_f64m1_tu(vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmacc_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwnmacc_vf_f64m1_tu(vfloat64m1_t vd, float vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwnmacc_vf_f64m1_tu(vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwnmacc_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwnmacc_vv_f64m2_tu(vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmacc_vf_f64m2_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwnmacc_vf_f64m2_tu(vfloat64m2_t vd, float vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwnmacc_vf_f64m2_tu(vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwnmacc_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwnmacc_vv_f64m4_tu(vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmacc_vf_f64m4_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwnmacc_vf_f64m4_tu(vfloat64m4_t vd, float vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwnmacc_vf_f64m4_tu(vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwnmacc_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwnmacc_vv_f64m8_tu(vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmacc_vf_f64m8_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwnmacc_vf_f64m8_tu(vfloat64m8_t vd, float vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwnmacc_vf_f64m8_tu(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmacc_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwnmacc_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmacc_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwnmacc_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + _Float16 vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwnmacc_vf_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmacc_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmacc_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmacc_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmacc_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + _Float16 vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwnmacc_vf_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmacc_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmacc_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmacc_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmacc_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + _Float16 vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwnmacc_vf_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmacc_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmacc_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmacc_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmacc_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + _Float16 vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwnmacc_vf_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmacc_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmacc_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmacc_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmacc_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + _Float16 vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwnmacc_vf_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmacc_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwnmacc_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmacc_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwnmacc_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + float vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwnmacc_vf_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmacc_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwnmacc_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmacc_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwnmacc_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + float vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwnmacc_vf_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmacc_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwnmacc_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmacc_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwnmacc_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + float vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwnmacc_vf_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmacc_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwnmacc_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmacc_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwnmacc_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwnmacc_vf_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmacc_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwnmacc_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmacc_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwnmacc_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + _Float16 vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwnmacc_vf_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmacc_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmacc_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmacc_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmacc_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + _Float16 vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwnmacc_vf_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmacc_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmacc_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmacc_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmacc_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + _Float16 vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwnmacc_vf_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmacc_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmacc_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmacc_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmacc_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + _Float16 vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwnmacc_vf_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmacc_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmacc_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmacc_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmacc_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + _Float16 vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwnmacc_vf_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmacc_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwnmacc_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmacc_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwnmacc_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + float vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwnmacc_vf_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmacc_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwnmacc_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmacc_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwnmacc_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + float vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwnmacc_vf_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmacc_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwnmacc_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmacc_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwnmacc_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + float vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwnmacc_vf_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmacc_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwnmacc_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmacc_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwnmacc_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + float vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwnmacc_vf_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmacc_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwnmacc_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmacc_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwnmacc_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + _Float16 vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwnmacc_vf_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmacc_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmacc_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmacc_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmacc_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + _Float16 vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwnmacc_vf_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmacc_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmacc_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmacc_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmacc_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + _Float16 vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwnmacc_vf_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmacc_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmacc_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmacc_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmacc_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + _Float16 vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwnmacc_vf_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmacc_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmacc_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmacc_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmacc_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + _Float16 vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwnmacc_vf_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmacc_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwnmacc_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmacc_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwnmacc_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwnmacc_vf_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmacc_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwnmacc_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmacc_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwnmacc_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwnmacc_vf_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmacc_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwnmacc_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmacc_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwnmacc_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwnmacc_vf_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmacc_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwnmacc_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f64m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmacc_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwnmacc_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwnmacc_vf_f64m8_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmacc_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwnmacc_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwnmacc_vv_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmacc_vf_f32mf2_rm_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwnmacc_vf_f32mf2_rm_tu(vfloat32mf2_t vd, _Float16 vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwnmacc_vf_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmacc_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmacc_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwnmacc_vv_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmacc_vf_f32m1_rm_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmacc_vf_f32m1_rm_tu(vfloat32m1_t vd, _Float16 vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwnmacc_vf_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmacc_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmacc_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwnmacc_vv_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmacc_vf_f32m2_rm_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmacc_vf_f32m2_rm_tu(vfloat32m2_t vd, _Float16 vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwnmacc_vf_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmacc_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmacc_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwnmacc_vv_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmacc_vf_f32m4_rm_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmacc_vf_f32m4_rm_tu(vfloat32m4_t vd, _Float16 vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwnmacc_vf_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmacc_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmacc_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwnmacc_vv_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmacc_vf_f32m8_rm_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmacc_vf_f32m8_rm_tu(vfloat32m8_t vd, _Float16 vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwnmacc_vf_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmacc_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwnmacc_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwnmacc_vv_f64m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmacc_vf_f64m1_rm_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwnmacc_vf_f64m1_rm_tu(vfloat64m1_t vd, float vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwnmacc_vf_f64m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmacc_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwnmacc_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwnmacc_vv_f64m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmacc_vf_f64m2_rm_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwnmacc_vf_f64m2_rm_tu(vfloat64m2_t vd, float vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwnmacc_vf_f64m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmacc_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwnmacc_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwnmacc_vv_f64m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmacc_vf_f64m4_rm_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwnmacc_vf_f64m4_rm_tu(vfloat64m4_t vd, float vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwnmacc_vf_f64m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmacc_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwnmacc_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwnmacc_vv_f64m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmacc_vf_f64m8_rm_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwnmacc_vf_f64m8_rm_tu(vfloat64m8_t vd, float vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwnmacc_vf_f64m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmacc_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmacc_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfwnmacc_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmacc_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + _Float16 vs1, vfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfwnmacc_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmacc_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfwnmacc_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmacc_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + _Float16 vs1, vfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m2_t test_vfwnmacc_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmacc_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m2_t test_vfwnmacc_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmacc_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + _Float16 vs1, vfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m4_t test_vfwnmacc_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmacc_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m4_t test_vfwnmacc_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmacc_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + _Float16 vs1, vfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m8_t test_vfwnmacc_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmacc_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m8_t test_vfwnmacc_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmacc_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + _Float16 vs1, vfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfwnmacc_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmacc_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfwnmacc_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmacc_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + float vs1, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m2_t test_vfwnmacc_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmacc_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m2_t test_vfwnmacc_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmacc_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + float vs1, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m4_t test_vfwnmacc_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmacc_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m4_t test_vfwnmacc_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmacc_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + float vs1, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m8_t test_vfwnmacc_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmacc_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m8_t test_vfwnmacc_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmacc_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + float vs1, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfwnmacc_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmacc_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfwnmacc_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmacc_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + _Float16 vs1, vfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfwnmacc_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmacc_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfwnmacc_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmacc_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + _Float16 vs1, vfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m2_t test_vfwnmacc_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmacc_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m2_t test_vfwnmacc_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmacc_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + _Float16 vs1, vfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m4_t test_vfwnmacc_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmacc_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m4_t test_vfwnmacc_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmacc_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + _Float16 vs1, vfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m8_t test_vfwnmacc_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmacc_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m8_t test_vfwnmacc_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmacc_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + _Float16 vs1, vfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfwnmacc_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmacc_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfwnmacc_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmacc_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + float vs1, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m2_t test_vfwnmacc_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmacc_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m2_t test_vfwnmacc_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmacc_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + float vs1, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m4_t test_vfwnmacc_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmacc_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m4_t test_vfwnmacc_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmacc_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + float vs1, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m8_t test_vfwnmacc_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmacc_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m8_t test_vfwnmacc_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmacc_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + float vs1, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfwnmacc_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vv_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmacc_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfwnmacc_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmacc_vf_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmacc_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + _Float16 vs1, vfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfwnmacc_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmacc_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmacc_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmacc_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + _Float16 vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwnmacc_vf_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmacc_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmacc_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmacc_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmacc_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + _Float16 vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwnmacc_vf_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmacc_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmacc_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmacc_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmacc_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + _Float16 vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwnmacc_vf_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmacc_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmacc_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmacc_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmacc_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + _Float16 vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwnmacc_vf_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmacc_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwnmacc_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmacc_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwnmacc_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + float vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwnmacc_vf_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmacc_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwnmacc_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmacc_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwnmacc_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + float vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwnmacc_vf_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmacc_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwnmacc_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmacc_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwnmacc_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + float vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwnmacc_vf_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmacc_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwnmacc_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwnmacc_vv_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmacc_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwnmacc_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + float vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwnmacc_vf_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfwnmsac.c b/auto-generated/policy_funcs/llvm-api-tests/vfwnmsac.c index 0f9865acb..fcefa8655 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfwnmsac.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfwnmsac.c @@ -1,583 +1,868 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32mf2_t test_vfwnmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwnmsac_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwnmsac_vv_f32mf2_tu(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmsac_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwnmsac_vf_f32mf2_tu(vfloat32mf2_t vd, _Float16 vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwnmsac_vf_f32mf2_tu(vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmsac_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwnmsac_vv_f32m1_tu(vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmsac_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmsac_vf_f32m1_tu(vfloat32m1_t vd, _Float16 vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwnmsac_vf_f32m1_tu(vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmsac_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwnmsac_vv_f32m2_tu(vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmsac_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmsac_vf_f32m2_tu(vfloat32m2_t vd, _Float16 vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwnmsac_vf_f32m2_tu(vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmsac_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwnmsac_vv_f32m4_tu(vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmsac_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmsac_vf_f32m4_tu(vfloat32m4_t vd, _Float16 vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwnmsac_vf_f32m4_tu(vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmsac_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwnmsac_vv_f32m8_tu(vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmsac_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmsac_vf_f32m8_tu(vfloat32m8_t vd, _Float16 vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwnmsac_vf_f32m8_tu(vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwnmsac_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwnmsac_vv_f64m1_tu(vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmsac_vf_f64m1_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwnmsac_vf_f64m1_tu(vfloat64m1_t vd, float vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwnmsac_vf_f64m1_tu(vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwnmsac_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwnmsac_vv_f64m2_tu(vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmsac_vf_f64m2_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwnmsac_vf_f64m2_tu(vfloat64m2_t vd, float vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwnmsac_vf_f64m2_tu(vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwnmsac_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwnmsac_vv_f64m4_tu(vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmsac_vf_f64m4_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwnmsac_vf_f64m4_tu(vfloat64m4_t vd, float vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwnmsac_vf_f64m4_tu(vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwnmsac_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwnmsac_vv_f64m8_tu(vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmsac_vf_f64m8_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwnmsac_vf_f64m8_tu(vfloat64m8_t vd, float vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwnmsac_vf_f64m8_tu(vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmsac_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwnmsac_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmsac_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwnmsac_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + _Float16 vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwnmsac_vf_f32mf2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmsac_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmsac_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmsac_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmsac_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + _Float16 vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwnmsac_vf_f32m1_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmsac_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmsac_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmsac_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmsac_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + _Float16 vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwnmsac_vf_f32m2_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmsac_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmsac_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmsac_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmsac_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + _Float16 vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwnmsac_vf_f32m4_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmsac_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmsac_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmsac_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmsac_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + _Float16 vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwnmsac_vf_f32m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmsac_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwnmsac_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmsac_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwnmsac_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + float vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwnmsac_vf_f64m1_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmsac_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwnmsac_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmsac_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwnmsac_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + float vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwnmsac_vf_f64m2_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmsac_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwnmsac_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmsac_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwnmsac_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + float vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwnmsac_vf_f64m4_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmsac_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwnmsac_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmsac_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwnmsac_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, float vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwnmsac_vf_f64m8_tum(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmsac_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwnmsac_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmsac_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwnmsac_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + _Float16 vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwnmsac_vf_f32mf2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmsac_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmsac_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmsac_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmsac_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + _Float16 vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwnmsac_vf_f32m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmsac_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmsac_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmsac_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmsac_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + _Float16 vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwnmsac_vf_f32m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmsac_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmsac_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmsac_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmsac_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + _Float16 vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwnmsac_vf_f32m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmsac_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmsac_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmsac_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmsac_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + _Float16 vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwnmsac_vf_f32m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmsac_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwnmsac_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmsac_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwnmsac_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + float vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwnmsac_vf_f64m1_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmsac_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwnmsac_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmsac_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwnmsac_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + float vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwnmsac_vf_f64m2_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmsac_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwnmsac_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmsac_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwnmsac_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + float vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwnmsac_vf_f64m4_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmsac_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwnmsac_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmsac_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwnmsac_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + float vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwnmsac_vf_f64m8_tumu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmsac_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwnmsac_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmsac_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwnmsac_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + _Float16 vs1, vfloat16mf4_t vs2, + size_t vl) { return __riscv_vfwnmsac_vf_f32mf2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmsac_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmsac_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m1_t test_vfwnmsac_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmsac_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + _Float16 vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwnmsac_vf_f32m1_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmsac_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmsac_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m2_t test_vfwnmsac_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmsac_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + _Float16 vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwnmsac_vf_f32m2_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmsac_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmsac_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m4_t test_vfwnmsac_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmsac_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + _Float16 vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwnmsac_vf_f32m4_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmsac_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmsac_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat32m8_t test_vfwnmsac_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmsac_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + _Float16 vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwnmsac_vf_f32m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmsac_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwnmsac_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m1_t test_vfwnmsac_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwnmsac_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, float vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwnmsac_vf_f64m1_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmsac_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwnmsac_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m2_t test_vfwnmsac_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwnmsac_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, float vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwnmsac_vf_f64m2_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmsac_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwnmsac_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m4_t test_vfwnmsac_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwnmsac_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, float vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwnmsac_vf_f64m4_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmsac_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwnmsac_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f64m8_mu(vm, vd, vs1, vs2, vl); } -vfloat64m8_t test_vfwnmsac_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwnmsac_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, float vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwnmsac_vf_f64m8_mu(vm, vd, vs1, vs2, vl); } -vfloat32mf2_t test_vfwnmsac_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwnmsac_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwnmsac_vv_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmsac_vf_f32mf2_rm_tu(vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { +vfloat32mf2_t test_vfwnmsac_vf_f32mf2_rm_tu(vfloat32mf2_t vd, _Float16 vs1, + vfloat16mf4_t vs2, size_t vl) { return __riscv_vfwnmsac_vf_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmsac_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmsac_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwnmsac_vv_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmsac_vf_f32m1_rm_tu(vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmsac_vf_f32m1_rm_tu(vfloat32m1_t vd, _Float16 vs1, + vfloat16mf2_t vs2, size_t vl) { return __riscv_vfwnmsac_vf_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmsac_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmsac_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwnmsac_vv_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmsac_vf_f32m2_rm_tu(vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmsac_vf_f32m2_rm_tu(vfloat32m2_t vd, _Float16 vs1, + vfloat16m1_t vs2, size_t vl) { return __riscv_vfwnmsac_vf_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmsac_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmsac_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwnmsac_vv_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmsac_vf_f32m4_rm_tu(vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmsac_vf_f32m4_rm_tu(vfloat32m4_t vd, _Float16 vs1, + vfloat16m2_t vs2, size_t vl) { return __riscv_vfwnmsac_vf_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmsac_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmsac_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwnmsac_vv_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmsac_vf_f32m8_rm_tu(vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmsac_vf_f32m8_rm_tu(vfloat32m8_t vd, _Float16 vs1, + vfloat16m4_t vs2, size_t vl) { return __riscv_vfwnmsac_vf_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmsac_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwnmsac_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwnmsac_vv_f64m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmsac_vf_f64m1_rm_tu(vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwnmsac_vf_f64m1_rm_tu(vfloat64m1_t vd, float vs1, + vfloat32mf2_t vs2, size_t vl) { return __riscv_vfwnmsac_vf_f64m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmsac_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwnmsac_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwnmsac_vv_f64m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmsac_vf_f64m2_rm_tu(vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwnmsac_vf_f64m2_rm_tu(vfloat64m2_t vd, float vs1, + vfloat32m1_t vs2, size_t vl) { return __riscv_vfwnmsac_vf_f64m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmsac_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwnmsac_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwnmsac_vv_f64m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmsac_vf_f64m4_rm_tu(vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwnmsac_vf_f64m4_rm_tu(vfloat64m4_t vd, float vs1, + vfloat32m2_t vs2, size_t vl) { return __riscv_vfwnmsac_vf_f64m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmsac_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwnmsac_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwnmsac_vv_f64m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmsac_vf_f64m8_rm_tu(vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwnmsac_vf_f64m8_rm_tu(vfloat64m8_t vd, float vs1, + vfloat32m4_t vs2, size_t vl) { return __riscv_vfwnmsac_vf_f64m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwnmsac_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmsac_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfwnmsac_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmsac_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + _Float16 vs1, vfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_f32mf2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfwnmsac_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmsac_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfwnmsac_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmsac_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + _Float16 vs1, vfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_f32m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m2_t test_vfwnmsac_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmsac_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m2_t test_vfwnmsac_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmsac_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + _Float16 vs1, vfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_f32m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m4_t test_vfwnmsac_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmsac_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m4_t test_vfwnmsac_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmsac_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + _Float16 vs1, vfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_f32m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m8_t test_vfwnmsac_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmsac_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m8_t test_vfwnmsac_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmsac_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + _Float16 vs1, vfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_f32m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfwnmsac_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmsac_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfwnmsac_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmsac_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + float vs1, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_f64m1_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m2_t test_vfwnmsac_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmsac_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m2_t test_vfwnmsac_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmsac_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + float vs1, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_f64m2_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m4_t test_vfwnmsac_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmsac_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m4_t test_vfwnmsac_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmsac_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + float vs1, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_f64m4_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m8_t test_vfwnmsac_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmsac_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m8_t test_vfwnmsac_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmsac_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + float vs1, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_f64m8_rm_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfwnmsac_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmsac_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfwnmsac_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmsac_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + _Float16 vs1, vfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_f32mf2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfwnmsac_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmsac_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs1, + vfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfwnmsac_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwnmsac_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + _Float16 vs1, vfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_f32m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m2_t test_vfwnmsac_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmsac_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m2_t test_vfwnmsac_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m2_t test_vfwnmsac_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + _Float16 vs1, vfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_f32m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m4_t test_vfwnmsac_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmsac_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m4_t test_vfwnmsac_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m4_t test_vfwnmsac_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + _Float16 vs1, vfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_f32m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m8_t test_vfwnmsac_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmsac_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m8_t test_vfwnmsac_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32m8_t test_vfwnmsac_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + _Float16 vs1, vfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_f32m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfwnmsac_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmsac_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs1, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfwnmsac_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwnmsac_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + float vs1, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_f64m1_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m2_t test_vfwnmsac_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmsac_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m2_t test_vfwnmsac_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m2_t test_vfwnmsac_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + float vs1, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_f64m2_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m4_t test_vfwnmsac_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmsac_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m4_t test_vfwnmsac_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m4_t test_vfwnmsac_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + float vs1, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_f64m4_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m8_t test_vfwnmsac_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmsac_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat64m8_t test_vfwnmsac_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat64m8_t test_vfwnmsac_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + float vs1, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_f64m8_rm_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfwnmsac_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vv_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmsac_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs1, + vfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfwnmsac_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) { - return __riscv_vfwnmsac_vf_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwnmsac_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + _Float16 vs1, vfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_f32mf2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfwnmsac_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmsac_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwnmsac_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) { +vfloat32m1_t test_vfwnmsac_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + _Float16 vs1, vfloat16mf2_t vs2, + size_t vl) { return __riscv_vfwnmsac_vf_f32m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmsac_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmsac_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwnmsac_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) { +vfloat32m2_t test_vfwnmsac_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + _Float16 vs1, vfloat16m1_t vs2, + size_t vl) { return __riscv_vfwnmsac_vf_f32m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmsac_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmsac_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwnmsac_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) { +vfloat32m4_t test_vfwnmsac_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + _Float16 vs1, vfloat16m2_t vs2, + size_t vl) { return __riscv_vfwnmsac_vf_f32m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmsac_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmsac_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwnmsac_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) { +vfloat32m8_t test_vfwnmsac_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + _Float16 vs1, vfloat16m4_t vs2, + size_t vl) { return __riscv_vfwnmsac_vf_f32m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmsac_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwnmsac_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwnmsac_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, float vs1, vfloat32mf2_t vs2, size_t vl) { +vfloat64m1_t test_vfwnmsac_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + float vs1, vfloat32mf2_t vs2, + size_t vl) { return __riscv_vfwnmsac_vf_f64m1_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmsac_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwnmsac_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwnmsac_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, float vs1, vfloat32m1_t vs2, size_t vl) { +vfloat64m2_t test_vfwnmsac_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + float vs1, vfloat32m1_t vs2, + size_t vl) { return __riscv_vfwnmsac_vf_f64m2_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmsac_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwnmsac_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwnmsac_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, float vs1, vfloat32m2_t vs2, size_t vl) { +vfloat64m4_t test_vfwnmsac_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + float vs1, vfloat32m2_t vs2, + size_t vl) { return __riscv_vfwnmsac_vf_f64m4_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmsac_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwnmsac_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwnmsac_vv_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwnmsac_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, float vs1, vfloat32m4_t vs2, size_t vl) { +vfloat64m8_t test_vfwnmsac_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + float vs1, vfloat32m4_t vs2, + size_t vl) { return __riscv_vfwnmsac_vf_f64m8_rm_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfwredosum.c b/auto-generated/policy_funcs/llvm-api-tests/vfwredosum.c index 9b8389dbf..269edd92e 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfwredosum.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfwredosum.c @@ -1,183 +1,313 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_tu(vfloat32m1_t vd, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_tu(vfloat32m1_t vd, + vfloat16mf4_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwredosum_vs_f16mf4_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_tu(vfloat32m1_t vd, + vfloat16mf2_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwredosum_vs_f16mf2_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_tu(vfloat32m1_t vd, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_tu(vfloat32m1_t vd, + vfloat16m1_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwredosum_vs_f16m1_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_tu(vfloat32m1_t vd, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_tu(vfloat32m1_t vd, + vfloat16m2_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwredosum_vs_f16m2_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_tu(vfloat32m1_t vd, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_tu(vfloat32m1_t vd, + vfloat16m4_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwredosum_vs_f16m4_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_tu(vfloat32m1_t vd, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_tu(vfloat32m1_t vd, + vfloat16m8_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwredosum_vs_f16m8_f32m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tu(vfloat64m1_t vd, + vfloat32mf2_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfwredosum_vs_f32mf2_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_tu(vfloat64m1_t vd, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_tu(vfloat64m1_t vd, + vfloat32m1_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfwredosum_vs_f32m1_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_tu(vfloat64m1_t vd, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_tu(vfloat64m1_t vd, + vfloat32m2_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfwredosum_vs_f32m2_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_tu(vfloat64m1_t vd, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_tu(vfloat64m1_t vd, + vfloat32m4_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfwredosum_vs_f32m4_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_tu(vfloat64m1_t vd, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_tu(vfloat64m1_t vd, + vfloat32m8_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfwredosum_vs_f32m8_f64m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_tum(vbool64_t vm, vfloat32m1_t vd, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_tum(vbool64_t vm, vfloat32m1_t vd, + vfloat16mf4_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwredosum_vs_f16mf4_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwredosum_vs_f16mf2_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_tum(vbool16_t vm, vfloat32m1_t vd, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_tum(vbool16_t vm, vfloat32m1_t vd, + vfloat16m1_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwredosum_vs_f16m1_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_tum(vbool8_t vm, vfloat32m1_t vd, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_tum(vbool8_t vm, vfloat32m1_t vd, + vfloat16m2_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwredosum_vs_f16m2_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_tum(vbool4_t vm, vfloat32m1_t vd, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_tum(vbool4_t vm, vfloat32m1_t vd, + vfloat16m4_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwredosum_vs_f16m4_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_tum(vbool2_t vm, vfloat32m1_t vd, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_tum(vbool2_t vm, vfloat32m1_t vd, + vfloat16m8_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwredosum_vs_f16m8_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfwredosum_vs_f32mf2_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_tum(vbool32_t vm, vfloat64m1_t vd, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_tum(vbool32_t vm, vfloat64m1_t vd, + vfloat32m1_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfwredosum_vs_f32m1_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_tum(vbool16_t vm, vfloat64m1_t vd, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_tum(vbool16_t vm, vfloat64m1_t vd, + vfloat32m2_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfwredosum_vs_f32m2_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_tum(vbool8_t vm, vfloat64m1_t vd, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_tum(vbool8_t vm, vfloat64m1_t vd, + vfloat32m4_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfwredosum_vs_f32m4_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_tum(vbool4_t vm, vfloat64m1_t vd, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_tum(vbool4_t vm, vfloat64m1_t vd, + vfloat32m8_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfwredosum_vs_f32m8_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfwredosum_vs_f16mf4_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_rm_tu(vfloat32m1_t vd, + vfloat16mf4_t vs2, + vfloat32m1_t vs1, + size_t vl) { + return __riscv_vfwredosum_vs_f16mf4_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfwredosum_vs_f16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vfloat16mf2_t vs2, + vfloat32m1_t vs1, + size_t vl) { + return __riscv_vfwredosum_vs_f16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_rm_tu(vfloat32m1_t vd, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfwredosum_vs_f16m1_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_rm_tu(vfloat32m1_t vd, + vfloat16m1_t vs2, + vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m1_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_rm_tu(vfloat32m1_t vd, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfwredosum_vs_f16m2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_rm_tu(vfloat32m1_t vd, + vfloat16m2_t vs2, + vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_rm_tu(vfloat32m1_t vd, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfwredosum_vs_f16m4_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_rm_tu(vfloat32m1_t vd, + vfloat16m4_t vs2, + vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m4_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_rm_tu(vfloat32m1_t vd, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfwredosum_vs_f16m8_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_rm_tu(vfloat32m1_t vd, + vfloat16m8_t vs2, + vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f16m8_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfwredosum_vs_f32mf2_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_rm_tu(vfloat64m1_t vd, + vfloat32mf2_t vs2, + vfloat64m1_t vs1, + size_t vl) { + return __riscv_vfwredosum_vs_f32mf2_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_rm_tu(vfloat64m1_t vd, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfwredosum_vs_f32m1_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_rm_tu(vfloat64m1_t vd, + vfloat32m1_t vs2, + vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m1_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_rm_tu(vfloat64m1_t vd, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfwredosum_vs_f32m2_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_rm_tu(vfloat64m1_t vd, + vfloat32m2_t vs2, + vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m2_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_rm_tu(vfloat64m1_t vd, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfwredosum_vs_f32m4_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_rm_tu(vfloat64m1_t vd, + vfloat32m4_t vs2, + vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m4_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_rm_tu(vfloat64m1_t vd, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfwredosum_vs_f32m8_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_rm_tu(vfloat64m1_t vd, + vfloat32m8_t vs2, + vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredosum_vs_f32m8_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_rm_tum(vbool64_t vm, vfloat32m1_t vd, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfwredosum_vs_f16mf4_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_rm_tum(vbool64_t vm, + vfloat32m1_t vd, + vfloat16mf4_t vs2, + vfloat32m1_t vs1, + size_t vl) { + return __riscv_vfwredosum_vs_f16mf4_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfwredosum_vs_f16mf2_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_rm_tum(vbool32_t vm, + vfloat32m1_t vd, + vfloat16mf2_t vs2, + vfloat32m1_t vs1, + size_t vl) { + return __riscv_vfwredosum_vs_f16mf2_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_rm_tum(vbool16_t vm, vfloat32m1_t vd, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfwredosum_vs_f16m1_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_rm_tum(vbool16_t vm, + vfloat32m1_t vd, + vfloat16m1_t vs2, + vfloat32m1_t vs1, + size_t vl) { + return __riscv_vfwredosum_vs_f16m1_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_rm_tum(vbool8_t vm, vfloat32m1_t vd, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfwredosum_vs_f16m2_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_rm_tum(vbool8_t vm, vfloat32m1_t vd, + vfloat16m2_t vs2, + vfloat32m1_t vs1, + size_t vl) { + return __riscv_vfwredosum_vs_f16m2_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_rm_tum(vbool4_t vm, vfloat32m1_t vd, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfwredosum_vs_f16m4_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_rm_tum(vbool4_t vm, vfloat32m1_t vd, + vfloat16m4_t vs2, + vfloat32m1_t vs1, + size_t vl) { + return __riscv_vfwredosum_vs_f16m4_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_rm_tum(vbool2_t vm, vfloat32m1_t vd, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfwredosum_vs_f16m8_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_rm_tum(vbool2_t vm, vfloat32m1_t vd, + vfloat16m8_t vs2, + vfloat32m1_t vs1, + size_t vl) { + return __riscv_vfwredosum_vs_f16m8_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_rm_tum(vbool64_t vm, + vfloat64m1_t vd, + vfloat32mf2_t vs2, + vfloat64m1_t vs1, + size_t vl) { + return __riscv_vfwredosum_vs_f32mf2_f64m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_rm_tum(vbool32_t vm, + vfloat64m1_t vd, + vfloat32m1_t vs2, + vfloat64m1_t vs1, + size_t vl) { + return __riscv_vfwredosum_vs_f32m1_f64m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_rm_tum(vbool16_t vm, + vfloat64m1_t vd, + vfloat32m2_t vs2, + vfloat64m1_t vs1, + size_t vl) { + return __riscv_vfwredosum_vs_f32m2_f64m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfwredosum_vs_f32mf2_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); -} - -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_rm_tum(vbool32_t vm, vfloat64m1_t vd, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfwredosum_vs_f32m1_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); -} - -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_rm_tum(vbool16_t vm, vfloat64m1_t vd, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfwredosum_vs_f32m2_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); -} - -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_rm_tum(vbool8_t vm, vfloat64m1_t vd, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfwredosum_vs_f32m4_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); -} - -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_rm_tum(vbool4_t vm, vfloat64m1_t vd, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfwredosum_vs_f32m8_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_rm_tum(vbool8_t vm, vfloat64m1_t vd, + vfloat32m4_t vs2, + vfloat64m1_t vs1, + size_t vl) { + return __riscv_vfwredosum_vs_f32m4_f64m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_rm_tum(vbool4_t vm, vfloat64m1_t vd, + vfloat32m8_t vs2, + vfloat64m1_t vs1, + size_t vl) { + return __riscv_vfwredosum_vs_f32m8_f64m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfwredusum.c b/auto-generated/policy_funcs/llvm-api-tests/vfwredusum.c index 442506448..eb1d79b82 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfwredusum.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfwredusum.c @@ -1,183 +1,313 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_tu(vfloat32m1_t vd, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_tu(vfloat32m1_t vd, + vfloat16mf4_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwredusum_vs_f16mf4_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_tu(vfloat32m1_t vd, + vfloat16mf2_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwredusum_vs_f16mf2_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_tu(vfloat32m1_t vd, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_tu(vfloat32m1_t vd, + vfloat16m1_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwredusum_vs_f16m1_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_tu(vfloat32m1_t vd, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_tu(vfloat32m1_t vd, + vfloat16m2_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwredusum_vs_f16m2_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_tu(vfloat32m1_t vd, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_tu(vfloat32m1_t vd, + vfloat16m4_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwredusum_vs_f16m4_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_tu(vfloat32m1_t vd, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_tu(vfloat32m1_t vd, + vfloat16m8_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwredusum_vs_f16m8_f32m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tu(vfloat64m1_t vd, + vfloat32mf2_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfwredusum_vs_f32mf2_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_tu(vfloat64m1_t vd, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_tu(vfloat64m1_t vd, + vfloat32m1_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfwredusum_vs_f32m1_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_tu(vfloat64m1_t vd, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_tu(vfloat64m1_t vd, + vfloat32m2_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfwredusum_vs_f32m2_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_tu(vfloat64m1_t vd, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_tu(vfloat64m1_t vd, + vfloat32m4_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfwredusum_vs_f32m4_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_tu(vfloat64m1_t vd, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_tu(vfloat64m1_t vd, + vfloat32m8_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfwredusum_vs_f32m8_f64m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_tum(vbool64_t vm, vfloat32m1_t vd, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_tum(vbool64_t vm, vfloat32m1_t vd, + vfloat16mf4_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwredusum_vs_f16mf4_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwredusum_vs_f16mf2_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_tum(vbool16_t vm, vfloat32m1_t vd, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_tum(vbool16_t vm, vfloat32m1_t vd, + vfloat16m1_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwredusum_vs_f16m1_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_tum(vbool8_t vm, vfloat32m1_t vd, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_tum(vbool8_t vm, vfloat32m1_t vd, + vfloat16m2_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwredusum_vs_f16m2_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_tum(vbool4_t vm, vfloat32m1_t vd, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_tum(vbool4_t vm, vfloat32m1_t vd, + vfloat16m4_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwredusum_vs_f16m4_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_tum(vbool2_t vm, vfloat32m1_t vd, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_tum(vbool2_t vm, vfloat32m1_t vd, + vfloat16m8_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwredusum_vs_f16m8_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfwredusum_vs_f32mf2_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_tum(vbool32_t vm, vfloat64m1_t vd, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_tum(vbool32_t vm, vfloat64m1_t vd, + vfloat32m1_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfwredusum_vs_f32m1_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_tum(vbool16_t vm, vfloat64m1_t vd, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_tum(vbool16_t vm, vfloat64m1_t vd, + vfloat32m2_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfwredusum_vs_f32m2_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_tum(vbool8_t vm, vfloat64m1_t vd, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_tum(vbool8_t vm, vfloat64m1_t vd, + vfloat32m4_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfwredusum_vs_f32m4_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_tum(vbool4_t vm, vfloat64m1_t vd, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_tum(vbool4_t vm, vfloat64m1_t vd, + vfloat32m8_t vs2, + vfloat64m1_t vs1, size_t vl) { return __riscv_vfwredusum_vs_f32m8_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfwredusum_vs_f16mf4_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_rm_tu(vfloat32m1_t vd, + vfloat16mf4_t vs2, + vfloat32m1_t vs1, + size_t vl) { + return __riscv_vfwredusum_vs_f16mf4_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfwredusum_vs_f16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vfloat16mf2_t vs2, + vfloat32m1_t vs1, + size_t vl) { + return __riscv_vfwredusum_vs_f16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_rm_tu(vfloat32m1_t vd, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfwredusum_vs_f16m1_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_rm_tu(vfloat32m1_t vd, + vfloat16m1_t vs2, + vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m1_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_rm_tu(vfloat32m1_t vd, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfwredusum_vs_f16m2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_rm_tu(vfloat32m1_t vd, + vfloat16m2_t vs2, + vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_rm_tu(vfloat32m1_t vd, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfwredusum_vs_f16m4_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_rm_tu(vfloat32m1_t vd, + vfloat16m4_t vs2, + vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m4_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_rm_tu(vfloat32m1_t vd, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfwredusum_vs_f16m8_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_rm_tu(vfloat32m1_t vd, + vfloat16m8_t vs2, + vfloat32m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f16m8_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfwredusum_vs_f32mf2_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_rm_tu(vfloat64m1_t vd, + vfloat32mf2_t vs2, + vfloat64m1_t vs1, + size_t vl) { + return __riscv_vfwredusum_vs_f32mf2_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_rm_tu(vfloat64m1_t vd, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfwredusum_vs_f32m1_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_rm_tu(vfloat64m1_t vd, + vfloat32m1_t vs2, + vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m1_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_rm_tu(vfloat64m1_t vd, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfwredusum_vs_f32m2_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_rm_tu(vfloat64m1_t vd, + vfloat32m2_t vs2, + vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m2_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_rm_tu(vfloat64m1_t vd, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfwredusum_vs_f32m4_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_rm_tu(vfloat64m1_t vd, + vfloat32m4_t vs2, + vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m4_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_rm_tu(vfloat64m1_t vd, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfwredusum_vs_f32m8_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_rm_tu(vfloat64m1_t vd, + vfloat32m8_t vs2, + vfloat64m1_t vs1, size_t vl) { + return __riscv_vfwredusum_vs_f32m8_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_rm_tum(vbool64_t vm, vfloat32m1_t vd, vfloat16mf4_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfwredusum_vs_f16mf4_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_rm_tum(vbool64_t vm, + vfloat32m1_t vd, + vfloat16mf4_t vs2, + vfloat32m1_t vs1, + size_t vl) { + return __riscv_vfwredusum_vs_f16mf4_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfwredusum_vs_f16mf2_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_rm_tum(vbool32_t vm, + vfloat32m1_t vd, + vfloat16mf2_t vs2, + vfloat32m1_t vs1, + size_t vl) { + return __riscv_vfwredusum_vs_f16mf2_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_rm_tum(vbool16_t vm, vfloat32m1_t vd, vfloat16m1_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfwredusum_vs_f16m1_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_rm_tum(vbool16_t vm, + vfloat32m1_t vd, + vfloat16m1_t vs2, + vfloat32m1_t vs1, + size_t vl) { + return __riscv_vfwredusum_vs_f16m1_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_rm_tum(vbool8_t vm, vfloat32m1_t vd, vfloat16m2_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfwredusum_vs_f16m2_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_rm_tum(vbool8_t vm, vfloat32m1_t vd, + vfloat16m2_t vs2, + vfloat32m1_t vs1, + size_t vl) { + return __riscv_vfwredusum_vs_f16m2_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_rm_tum(vbool4_t vm, vfloat32m1_t vd, vfloat16m4_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfwredusum_vs_f16m4_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_rm_tum(vbool4_t vm, vfloat32m1_t vd, + vfloat16m4_t vs2, + vfloat32m1_t vs1, + size_t vl) { + return __riscv_vfwredusum_vs_f16m4_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_rm_tum(vbool2_t vm, vfloat32m1_t vd, vfloat16m8_t vs2, vfloat32m1_t vs1, size_t vl) { - return __riscv_vfwredusum_vs_f16m8_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_rm_tum(vbool2_t vm, vfloat32m1_t vd, + vfloat16m8_t vs2, + vfloat32m1_t vs1, + size_t vl) { + return __riscv_vfwredusum_vs_f16m8_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_rm_tum(vbool64_t vm, + vfloat64m1_t vd, + vfloat32mf2_t vs2, + vfloat64m1_t vs1, + size_t vl) { + return __riscv_vfwredusum_vs_f32mf2_f64m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_rm_tum(vbool32_t vm, + vfloat64m1_t vd, + vfloat32m1_t vs2, + vfloat64m1_t vs1, + size_t vl) { + return __riscv_vfwredusum_vs_f32m1_f64m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_rm_tum(vbool16_t vm, + vfloat64m1_t vd, + vfloat32m2_t vs2, + vfloat64m1_t vs1, + size_t vl) { + return __riscv_vfwredusum_vs_f32m2_f64m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfwredusum_vs_f32mf2_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); -} - -vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_rm_tum(vbool32_t vm, vfloat64m1_t vd, vfloat32m1_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfwredusum_vs_f32m1_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); -} - -vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_rm_tum(vbool16_t vm, vfloat64m1_t vd, vfloat32m2_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfwredusum_vs_f32m2_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); -} - -vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_rm_tum(vbool8_t vm, vfloat64m1_t vd, vfloat32m4_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfwredusum_vs_f32m4_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); -} - -vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_rm_tum(vbool4_t vm, vfloat64m1_t vd, vfloat32m8_t vs2, vfloat64m1_t vs1, size_t vl) { - return __riscv_vfwredusum_vs_f32m8_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_rm_tum(vbool8_t vm, vfloat64m1_t vd, + vfloat32m4_t vs2, + vfloat64m1_t vs1, + size_t vl) { + return __riscv_vfwredusum_vs_f32m4_f64m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_rm_tum(vbool4_t vm, vfloat64m1_t vd, + vfloat32m8_t vs2, + vfloat64m1_t vs1, + size_t vl) { + return __riscv_vfwredusum_vs_f32m8_f64m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vfwsub.c b/auto-generated/policy_funcs/llvm-api-tests/vfwsub.c index a192eae08..15f01a51d 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vfwsub.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vfwsub.c @@ -1,1159 +1,1644 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32mf2_t test_vfwsub_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwsub_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, + vfloat16mf4_t vs1, size_t vl) { return __riscv_vfwsub_vv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwsub_vf_f32mf2_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwsub_vf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwsub_wv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + vfloat16mf4_t vs1, size_t vl) { return __riscv_vfwsub_wv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwsub_wf_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwsub_wf_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwsub_vv_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, + vfloat16mf2_t vs1, size_t vl) { return __riscv_vfwsub_vv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwsub_vf_f32m1_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwsub_vf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwsub_wv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vfloat16mf2_t vs1, size_t vl) { return __riscv_vfwsub_wv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwsub_wf_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwsub_wf_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwsub_vv_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfwsub_vv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwsub_vf_f32m2_tu(vfloat32m2_t vd, vfloat16m1_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwsub_vf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwsub_wv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfwsub_wv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwsub_wf_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwsub_wf_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwsub_vv_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs2, + vfloat16m2_t vs1, size_t vl) { return __riscv_vfwsub_vv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwsub_vf_f32m4_tu(vfloat32m4_t vd, vfloat16m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwsub_vf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwsub_wv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + vfloat16m2_t vs1, size_t vl) { return __riscv_vfwsub_wv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwsub_wf_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwsub_wf_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwsub_vv_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs2, + vfloat16m4_t vs1, size_t vl) { return __riscv_vfwsub_vv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwsub_vf_f32m8_tu(vfloat32m8_t vd, vfloat16m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwsub_vf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwsub_wv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + vfloat16m4_t vs1, size_t vl) { return __riscv_vfwsub_wv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwsub_wf_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwsub_wf_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwsub_vv_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, + vfloat32mf2_t vs1, size_t vl) { return __riscv_vfwsub_vv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwsub_vf_f64m1_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, + float rs1, size_t vl) { return __riscv_vfwsub_vf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwsub_wv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + vfloat32mf2_t vs1, size_t vl) { return __riscv_vfwsub_wv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwsub_wf_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + float rs1, size_t vl) { return __riscv_vfwsub_wf_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwsub_vv_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwsub_vv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwsub_vf_f64m2_tu(vfloat64m2_t vd, vfloat32m1_t vs2, + float rs1, size_t vl) { return __riscv_vfwsub_vf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwsub_wv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwsub_wv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwsub_wf_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + float rs1, size_t vl) { return __riscv_vfwsub_wf_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwsub_vv_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs2, + vfloat32m2_t vs1, size_t vl) { return __riscv_vfwsub_vv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwsub_vf_f64m4_tu(vfloat64m4_t vd, vfloat32m2_t vs2, + float rs1, size_t vl) { return __riscv_vfwsub_vf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwsub_wv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + vfloat32m2_t vs1, size_t vl) { return __riscv_vfwsub_wv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwsub_wf_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + float rs1, size_t vl) { return __riscv_vfwsub_wf_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwsub_vv_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs2, + vfloat32m4_t vs1, size_t vl) { return __riscv_vfwsub_vv_f64m8_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwsub_vf_f64m8_tu(vfloat64m8_t vd, vfloat32m4_t vs2, + float rs1, size_t vl) { return __riscv_vfwsub_vf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwsub_wv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + vfloat32m4_t vs1, size_t vl) { return __riscv_vfwsub_wv_f64m8_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwsub_wf_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + float rs1, size_t vl) { return __riscv_vfwsub_wf_f64m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwsub_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwsub_vf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_vf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwsub_wv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwsub_wf_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_wf_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwsub_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwsub_vf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_vf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwsub_wv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwsub_wf_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_wf_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwsub_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwsub_vf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_vf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwsub_wv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwsub_wf_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_wf_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwsub_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwsub_vf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_vf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwsub_wv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwsub_wf_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_wf_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwsub_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwsub_vf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_vf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwsub_wv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwsub_wf_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_wf_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwsub_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwsub_vf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, float rs1, size_t vl) { return __riscv_vfwsub_vf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwsub_wv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwsub_wf_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, float rs1, size_t vl) { return __riscv_vfwsub_wf_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwsub_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwsub_vf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfwsub_vf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwsub_wv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwsub_wf_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, float rs1, size_t vl) { return __riscv_vfwsub_wf_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwsub_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwsub_vf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfwsub_vf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwsub_wv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwsub_wf_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, float rs1, size_t vl) { return __riscv_vfwsub_wf_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwsub_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwsub_vf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfwsub_vf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwsub_wv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwsub_wf_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, float rs1, size_t vl) { return __riscv_vfwsub_wf_f64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwsub_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwsub_vf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_vf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwsub_wv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwsub_wf_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_wf_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwsub_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwsub_vf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_vf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwsub_wv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwsub_wf_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_wf_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwsub_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwsub_vf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_vf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwsub_wv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwsub_wf_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_wf_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwsub_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwsub_vf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_vf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwsub_wv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwsub_wf_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_wf_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwsub_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwsub_vf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_vf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwsub_wv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwsub_wf_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_wf_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwsub_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwsub_vf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfwsub_vf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwsub_wv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwsub_wf_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, float rs1, size_t vl) { return __riscv_vfwsub_wf_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwsub_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwsub_vf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfwsub_vf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwsub_wv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwsub_wf_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, float rs1, size_t vl) { return __riscv_vfwsub_wf_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwsub_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwsub_vf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfwsub_vf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwsub_wv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwsub_wf_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, float rs1, size_t vl) { return __riscv_vfwsub_wf_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwsub_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwsub_vf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfwsub_vf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwsub_wv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwsub_wf_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, float rs1, size_t vl) { return __riscv_vfwsub_wf_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwsub_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwsub_vf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_vf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwsub_wv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwsub_wf_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_wf_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwsub_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwsub_vf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_vf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwsub_wv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwsub_wf_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_wf_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwsub_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwsub_vf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_vf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwsub_wv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwsub_wf_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_wf_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwsub_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwsub_vf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_vf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwsub_wv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwsub_wf_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_wf_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwsub_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwsub_vf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_vf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwsub_wv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwsub_wf_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_wf_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwsub_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwsub_vf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, float rs1, size_t vl) { return __riscv_vfwsub_vf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwsub_wv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwsub_wf_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, float rs1, size_t vl) { return __riscv_vfwsub_wf_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwsub_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwsub_vf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vfwsub_vf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwsub_wv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwsub_wf_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, float rs1, size_t vl) { return __riscv_vfwsub_wf_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwsub_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwsub_vf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vfwsub_vf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwsub_wv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwsub_wf_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, float rs1, size_t vl) { return __riscv_vfwsub_wf_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwsub_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwsub_vf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, float rs1, size_t vl) { return __riscv_vfwsub_vf_f64m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwsub_wv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwsub_wf_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, float rs1, size_t vl) { return __riscv_vfwsub_wf_f64m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vfwsub_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwsub_vv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, + vfloat16mf4_t vs1, size_t vl) { return __riscv_vfwsub_vv_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwsub_vf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat16mf4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwsub_vf_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwsub_wv_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + vfloat16mf4_t vs1, size_t vl) { return __riscv_vfwsub_wv_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwsub_wf_f32mf2_rm_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwsub_wf_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwsub_vv_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, + vfloat16mf2_t vs1, size_t vl) { return __riscv_vfwsub_vv_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwsub_vf_f32m1_rm_tu(vfloat32m1_t vd, vfloat16mf2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwsub_vf_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwsub_wv_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vfloat16mf2_t vs1, size_t vl) { return __riscv_vfwsub_wv_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwsub_wf_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwsub_wf_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwsub_vv_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfwsub_vv_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwsub_vf_f32m2_rm_tu(vfloat32m2_t vd, vfloat16m1_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwsub_vf_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwsub_wv_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + vfloat16m1_t vs1, size_t vl) { return __riscv_vfwsub_wv_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwsub_wf_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwsub_wf_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwsub_vv_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs2, + vfloat16m2_t vs1, size_t vl) { return __riscv_vfwsub_vv_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwsub_vf_f32m4_rm_tu(vfloat32m4_t vd, vfloat16m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwsub_vf_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwsub_wv_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + vfloat16m2_t vs1, size_t vl) { return __riscv_vfwsub_wv_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwsub_wf_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwsub_wf_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwsub_vv_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs2, + vfloat16m4_t vs1, size_t vl) { return __riscv_vfwsub_vv_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwsub_vf_f32m8_rm_tu(vfloat32m8_t vd, vfloat16m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwsub_vf_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwsub_wv_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + vfloat16m4_t vs1, size_t vl) { return __riscv_vfwsub_wv_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwsub_wf_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vfwsub_wf_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwsub_vv_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, + vfloat32mf2_t vs1, size_t vl) { return __riscv_vfwsub_vv_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwsub_vf_f64m1_rm_tu(vfloat64m1_t vd, vfloat32mf2_t vs2, + float rs1, size_t vl) { return __riscv_vfwsub_vf_f64m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwsub_wv_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + vfloat32mf2_t vs1, size_t vl) { return __riscv_vfwsub_wv_f64m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwsub_wf_f64m1_rm_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + float rs1, size_t vl) { return __riscv_vfwsub_wf_f64m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwsub_vv_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwsub_vv_f64m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwsub_vf_f64m2_rm_tu(vfloat64m2_t vd, vfloat32m1_t vs2, + float rs1, size_t vl) { return __riscv_vfwsub_vf_f64m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwsub_wv_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + vfloat32m1_t vs1, size_t vl) { return __riscv_vfwsub_wv_f64m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwsub_wf_f64m2_rm_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + float rs1, size_t vl) { return __riscv_vfwsub_wf_f64m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwsub_vv_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs2, + vfloat32m2_t vs1, size_t vl) { return __riscv_vfwsub_vv_f64m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwsub_vf_f64m4_rm_tu(vfloat64m4_t vd, vfloat32m2_t vs2, + float rs1, size_t vl) { return __riscv_vfwsub_vf_f64m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwsub_wv_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + vfloat32m2_t vs1, size_t vl) { return __riscv_vfwsub_wv_f64m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwsub_wf_f64m4_rm_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + float rs1, size_t vl) { return __riscv_vfwsub_wf_f64m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwsub_vv_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs2, + vfloat32m4_t vs1, size_t vl) { return __riscv_vfwsub_vv_f64m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwsub_vf_f64m8_rm_tu(vfloat64m8_t vd, vfloat32m4_t vs2, + float rs1, size_t vl) { return __riscv_vfwsub_vf_f64m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwsub_wv_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + vfloat32m4_t vs1, size_t vl) { return __riscv_vfwsub_wv_f64m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwsub_wf_f64m8_rm_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + float rs1, size_t vl) { return __riscv_vfwsub_wf_f64m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwsub_vv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f32mf2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwsub_vf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_vf_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwsub_wv_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f32mf2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwsub_wf_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_wf_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwsub_vv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwsub_vf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_vf_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwsub_wv_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f32m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwsub_wf_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_wf_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwsub_vv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f32m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwsub_vf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_vf_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwsub_wv_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f32m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwsub_wf_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_wf_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwsub_vv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f32m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwsub_vf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_vf_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwsub_wv_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f32m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwsub_wf_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_wf_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwsub_vv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f32m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwsub_vf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_vf_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwsub_wv_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f32m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwsub_wf_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_wf_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwsub_vv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwsub_vf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfwsub_vf_f64m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwsub_wv_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f64m1_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwsub_wf_f64m1_rm_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, float rs1, + size_t vl) { return __riscv_vfwsub_wf_f64m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwsub_vv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f64m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwsub_vf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, float rs1, + size_t vl) { return __riscv_vfwsub_vf_f64m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwsub_wv_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f64m2_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwsub_wf_f64m2_rm_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, float rs1, + size_t vl) { return __riscv_vfwsub_wf_f64m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwsub_vv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f64m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwsub_vf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, float rs1, + size_t vl) { return __riscv_vfwsub_vf_f64m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwsub_wv_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f64m4_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwsub_wf_f64m4_rm_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, float rs1, + size_t vl) { return __riscv_vfwsub_wf_f64m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwsub_vv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f64m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwsub_vf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, float rs1, + size_t vl) { return __riscv_vfwsub_vf_f64m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwsub_wv_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f64m8_rm_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwsub_wf_f64m8_rm_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, float rs1, + size_t vl) { return __riscv_vfwsub_wf_f64m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { - return __riscv_vfwsub_vv_f32mf2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_vv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, + vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_f32mf2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { - return __riscv_vfwsub_vf_f32mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_vf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_f32mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { - return __riscv_vfwsub_wv_f32mf2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_wv_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_f32mf2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, _Float16 rs1, size_t vl) { - return __riscv_vfwsub_wf_f32mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +vfloat32mf2_t test_vfwsub_wf_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, _Float16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_f32mf2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); } -vfloat32m1_t test_vfwsub_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwsub_vv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f32m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwsub_vf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_vf_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwsub_wv_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f32m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwsub_wf_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_wf_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwsub_vv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f32m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwsub_vf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_vf_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwsub_wv_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f32m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwsub_wf_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_wf_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwsub_vv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f32m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwsub_vf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_vf_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwsub_wv_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f32m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwsub_wf_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_wf_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwsub_vv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f32m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwsub_vf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_vf_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwsub_wv_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f32m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwsub_wf_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_wf_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwsub_vv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f64m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwsub_vf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfwsub_vf_f64m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwsub_wv_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f64m1_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwsub_wf_f64m1_rm_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, float rs1, + size_t vl) { return __riscv_vfwsub_wf_f64m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwsub_vv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f64m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwsub_vf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, float rs1, + size_t vl) { return __riscv_vfwsub_vf_f64m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwsub_wv_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f64m2_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwsub_wf_f64m2_rm_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, float rs1, + size_t vl) { return __riscv_vfwsub_wf_f64m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwsub_vv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f64m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwsub_vf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, float rs1, + size_t vl) { return __riscv_vfwsub_vf_f64m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwsub_wv_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f64m4_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwsub_wf_f64m4_rm_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, float rs1, + size_t vl) { return __riscv_vfwsub_wf_f64m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwsub_vv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f64m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwsub_vf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, float rs1, + size_t vl) { return __riscv_vfwsub_vf_f64m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwsub_wv_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f64m8_rm_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwsub_wf_f64m8_rm_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, float rs1, + size_t vl) { return __riscv_vfwsub_wf_f64m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwsub_vv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f32mf2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwsub_vf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_vf_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_wv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vfwsub_wv_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f32mf2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32mf2_t test_vfwsub_wf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32mf2_t test_vfwsub_wf_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_wf_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwsub_vv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f32m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwsub_vf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_vf_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vfloat16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vfwsub_wv_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f32m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m1_t test_vfwsub_wf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m1_t test_vfwsub_wf_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_wf_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwsub_vv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwsub_vf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_vf_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vfloat16m1_t vs1, size_t vl) { +vfloat32m2_t test_vfwsub_wv_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m2_t test_vfwsub_wf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m2_t test_vfwsub_wf_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_wf_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwsub_vv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwsub_vf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat16m2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_vf_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vfloat16m2_t vs1, size_t vl) { +vfloat32m4_t test_vfwsub_wv_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vfloat16m2_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m4_t test_vfwsub_wf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m4_t test_vfwsub_wf_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_wf_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwsub_vv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwsub_vf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat16m4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_vf_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vfloat16m4_t vs1, size_t vl) { +vfloat32m8_t test_vfwsub_wv_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vfloat16m4_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat32m8_t test_vfwsub_wf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, _Float16 rs1, size_t vl) { +vfloat32m8_t test_vfwsub_wf_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vfwsub_wf_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwsub_vv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f64m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwsub_vf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat32mf2_t vs2, float rs1, + size_t vl) { return __riscv_vfwsub_vf_f64m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vfloat32mf2_t vs1, size_t vl) { +vfloat64m1_t test_vfwsub_wv_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f64m1_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m1_t test_vfwsub_wf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, float rs1, size_t vl) { +vfloat64m1_t test_vfwsub_wf_f64m1_rm_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, float rs1, + size_t vl) { return __riscv_vfwsub_wf_f64m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwsub_vv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f64m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwsub_vf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat32m1_t vs2, float rs1, + size_t vl) { return __riscv_vfwsub_vf_f64m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vfloat32m1_t vs1, size_t vl) { +vfloat64m2_t test_vfwsub_wv_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f64m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m2_t test_vfwsub_wf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, float rs1, size_t vl) { +vfloat64m2_t test_vfwsub_wf_f64m2_rm_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, float rs1, + size_t vl) { return __riscv_vfwsub_wf_f64m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwsub_vv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f64m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwsub_vf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat32m2_t vs2, float rs1, + size_t vl) { return __riscv_vfwsub_vf_f64m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vfloat32m2_t vs1, size_t vl) { +vfloat64m4_t test_vfwsub_wv_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f64m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m4_t test_vfwsub_wf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, float rs1, size_t vl) { +vfloat64m4_t test_vfwsub_wf_f64m4_rm_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, float rs1, + size_t vl) { return __riscv_vfwsub_wf_f64m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwsub_vv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfwsub_vv_f64m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwsub_vf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat32m4_t vs2, float rs1, + size_t vl) { return __riscv_vfwsub_vf_f64m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vfloat32m4_t vs1, size_t vl) { +vfloat64m8_t test_vfwsub_wv_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vfloat32m4_t vs1, + size_t vl) { return __riscv_vfwsub_wv_f64m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); } -vfloat64m8_t test_vfwsub_wf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, float rs1, size_t vl) { +vfloat64m8_t test_vfwsub_wf_f64m8_rm_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, float rs1, + size_t vl) { return __riscv_vfwsub_wf_f64m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/viota.c b/auto-generated/policy_funcs/llvm-api-tests/viota.c index 4d6754430..77b9da2bd 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/viota.c +++ b/auto-generated/policy_funcs/llvm-api-tests/viota.c @@ -93,266 +93,332 @@ vuint64m8_t test_viota_m_u64m8_tu(vuint64m8_t vd, vbool8_t vs2, size_t vl) { return __riscv_viota_m_u64m8_tu(vd, vs2, vl); } -vuint8mf8_t test_viota_m_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vbool64_t vs2, size_t vl) { +vuint8mf8_t test_viota_m_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vbool64_t vs2, + size_t vl) { return __riscv_viota_m_u8mf8_tum(vm, vd, vs2, vl); } -vuint8mf4_t test_viota_m_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vbool32_t vs2, size_t vl) { +vuint8mf4_t test_viota_m_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vbool32_t vs2, + size_t vl) { return __riscv_viota_m_u8mf4_tum(vm, vd, vs2, vl); } -vuint8mf2_t test_viota_m_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vbool16_t vs2, size_t vl) { +vuint8mf2_t test_viota_m_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vbool16_t vs2, + size_t vl) { return __riscv_viota_m_u8mf2_tum(vm, vd, vs2, vl); } -vuint8m1_t test_viota_m_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vbool8_t vs2, size_t vl) { +vuint8m1_t test_viota_m_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vbool8_t vs2, + size_t vl) { return __riscv_viota_m_u8m1_tum(vm, vd, vs2, vl); } -vuint8m2_t test_viota_m_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vbool4_t vs2, size_t vl) { +vuint8m2_t test_viota_m_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vbool4_t vs2, + size_t vl) { return __riscv_viota_m_u8m2_tum(vm, vd, vs2, vl); } -vuint8m4_t test_viota_m_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vbool2_t vs2, size_t vl) { +vuint8m4_t test_viota_m_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vbool2_t vs2, + size_t vl) { return __riscv_viota_m_u8m4_tum(vm, vd, vs2, vl); } -vuint8m8_t test_viota_m_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vbool1_t vs2, size_t vl) { +vuint8m8_t test_viota_m_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vbool1_t vs2, + size_t vl) { return __riscv_viota_m_u8m8_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_viota_m_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vbool64_t vs2, size_t vl) { +vuint16mf4_t test_viota_m_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vbool64_t vs2, size_t vl) { return __riscv_viota_m_u16mf4_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_viota_m_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vbool32_t vs2, size_t vl) { +vuint16mf2_t test_viota_m_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vbool32_t vs2, size_t vl) { return __riscv_viota_m_u16mf2_tum(vm, vd, vs2, vl); } -vuint16m1_t test_viota_m_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vbool16_t vs2, size_t vl) { +vuint16m1_t test_viota_m_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vbool16_t vs2, + size_t vl) { return __riscv_viota_m_u16m1_tum(vm, vd, vs2, vl); } -vuint16m2_t test_viota_m_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vbool8_t vs2, size_t vl) { +vuint16m2_t test_viota_m_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vbool8_t vs2, + size_t vl) { return __riscv_viota_m_u16m2_tum(vm, vd, vs2, vl); } -vuint16m4_t test_viota_m_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vbool4_t vs2, size_t vl) { +vuint16m4_t test_viota_m_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vbool4_t vs2, + size_t vl) { return __riscv_viota_m_u16m4_tum(vm, vd, vs2, vl); } -vuint16m8_t test_viota_m_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vbool2_t vs2, size_t vl) { +vuint16m8_t test_viota_m_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vbool2_t vs2, + size_t vl) { return __riscv_viota_m_u16m8_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_viota_m_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vbool64_t vs2, size_t vl) { +vuint32mf2_t test_viota_m_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vbool64_t vs2, size_t vl) { return __riscv_viota_m_u32mf2_tum(vm, vd, vs2, vl); } -vuint32m1_t test_viota_m_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vbool32_t vs2, size_t vl) { +vuint32m1_t test_viota_m_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vbool32_t vs2, + size_t vl) { return __riscv_viota_m_u32m1_tum(vm, vd, vs2, vl); } -vuint32m2_t test_viota_m_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vbool16_t vs2, size_t vl) { +vuint32m2_t test_viota_m_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vbool16_t vs2, + size_t vl) { return __riscv_viota_m_u32m2_tum(vm, vd, vs2, vl); } -vuint32m4_t test_viota_m_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vbool8_t vs2, size_t vl) { +vuint32m4_t test_viota_m_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vbool8_t vs2, + size_t vl) { return __riscv_viota_m_u32m4_tum(vm, vd, vs2, vl); } -vuint32m8_t test_viota_m_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vbool4_t vs2, size_t vl) { +vuint32m8_t test_viota_m_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vbool4_t vs2, + size_t vl) { return __riscv_viota_m_u32m8_tum(vm, vd, vs2, vl); } -vuint64m1_t test_viota_m_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vbool64_t vs2, size_t vl) { +vuint64m1_t test_viota_m_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vbool64_t vs2, + size_t vl) { return __riscv_viota_m_u64m1_tum(vm, vd, vs2, vl); } -vuint64m2_t test_viota_m_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vbool32_t vs2, size_t vl) { +vuint64m2_t test_viota_m_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vbool32_t vs2, + size_t vl) { return __riscv_viota_m_u64m2_tum(vm, vd, vs2, vl); } -vuint64m4_t test_viota_m_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vbool16_t vs2, size_t vl) { +vuint64m4_t test_viota_m_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vbool16_t vs2, + size_t vl) { return __riscv_viota_m_u64m4_tum(vm, vd, vs2, vl); } -vuint64m8_t test_viota_m_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vbool8_t vs2, size_t vl) { +vuint64m8_t test_viota_m_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vbool8_t vs2, + size_t vl) { return __riscv_viota_m_u64m8_tum(vm, vd, vs2, vl); } -vuint8mf8_t test_viota_m_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vbool64_t vs2, size_t vl) { +vuint8mf8_t test_viota_m_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vbool64_t vs2, + size_t vl) { return __riscv_viota_m_u8mf8_tumu(vm, vd, vs2, vl); } -vuint8mf4_t test_viota_m_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vbool32_t vs2, size_t vl) { +vuint8mf4_t test_viota_m_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vbool32_t vs2, + size_t vl) { return __riscv_viota_m_u8mf4_tumu(vm, vd, vs2, vl); } -vuint8mf2_t test_viota_m_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vbool16_t vs2, size_t vl) { +vuint8mf2_t test_viota_m_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vbool16_t vs2, + size_t vl) { return __riscv_viota_m_u8mf2_tumu(vm, vd, vs2, vl); } -vuint8m1_t test_viota_m_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vbool8_t vs2, size_t vl) { +vuint8m1_t test_viota_m_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vbool8_t vs2, + size_t vl) { return __riscv_viota_m_u8m1_tumu(vm, vd, vs2, vl); } -vuint8m2_t test_viota_m_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vbool4_t vs2, size_t vl) { +vuint8m2_t test_viota_m_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vbool4_t vs2, + size_t vl) { return __riscv_viota_m_u8m2_tumu(vm, vd, vs2, vl); } -vuint8m4_t test_viota_m_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vbool2_t vs2, size_t vl) { +vuint8m4_t test_viota_m_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vbool2_t vs2, + size_t vl) { return __riscv_viota_m_u8m4_tumu(vm, vd, vs2, vl); } -vuint8m8_t test_viota_m_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vbool1_t vs2, size_t vl) { +vuint8m8_t test_viota_m_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vbool1_t vs2, + size_t vl) { return __riscv_viota_m_u8m8_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_viota_m_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vbool64_t vs2, size_t vl) { +vuint16mf4_t test_viota_m_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vbool64_t vs2, size_t vl) { return __riscv_viota_m_u16mf4_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_viota_m_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vbool32_t vs2, size_t vl) { +vuint16mf2_t test_viota_m_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vbool32_t vs2, size_t vl) { return __riscv_viota_m_u16mf2_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_viota_m_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vbool16_t vs2, size_t vl) { +vuint16m1_t test_viota_m_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vbool16_t vs2, + size_t vl) { return __riscv_viota_m_u16m1_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_viota_m_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vbool8_t vs2, size_t vl) { +vuint16m2_t test_viota_m_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vbool8_t vs2, + size_t vl) { return __riscv_viota_m_u16m2_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_viota_m_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vbool4_t vs2, size_t vl) { +vuint16m4_t test_viota_m_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vbool4_t vs2, + size_t vl) { return __riscv_viota_m_u16m4_tumu(vm, vd, vs2, vl); } -vuint16m8_t test_viota_m_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vbool2_t vs2, size_t vl) { +vuint16m8_t test_viota_m_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vbool2_t vs2, + size_t vl) { return __riscv_viota_m_u16m8_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_viota_m_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vbool64_t vs2, size_t vl) { +vuint32mf2_t test_viota_m_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vbool64_t vs2, size_t vl) { return __riscv_viota_m_u32mf2_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_viota_m_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vbool32_t vs2, size_t vl) { +vuint32m1_t test_viota_m_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vbool32_t vs2, + size_t vl) { return __riscv_viota_m_u32m1_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_viota_m_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vbool16_t vs2, size_t vl) { +vuint32m2_t test_viota_m_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vbool16_t vs2, + size_t vl) { return __riscv_viota_m_u32m2_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_viota_m_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vbool8_t vs2, size_t vl) { +vuint32m4_t test_viota_m_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vbool8_t vs2, + size_t vl) { return __riscv_viota_m_u32m4_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_viota_m_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vbool4_t vs2, size_t vl) { +vuint32m8_t test_viota_m_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vbool4_t vs2, + size_t vl) { return __riscv_viota_m_u32m8_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_viota_m_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vbool64_t vs2, size_t vl) { +vuint64m1_t test_viota_m_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vbool64_t vs2, + size_t vl) { return __riscv_viota_m_u64m1_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_viota_m_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vbool32_t vs2, size_t vl) { +vuint64m2_t test_viota_m_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vbool32_t vs2, + size_t vl) { return __riscv_viota_m_u64m2_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_viota_m_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vbool16_t vs2, size_t vl) { +vuint64m4_t test_viota_m_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vbool16_t vs2, + size_t vl) { return __riscv_viota_m_u64m4_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_viota_m_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vbool8_t vs2, size_t vl) { +vuint64m8_t test_viota_m_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vbool8_t vs2, + size_t vl) { return __riscv_viota_m_u64m8_tumu(vm, vd, vs2, vl); } -vuint8mf8_t test_viota_m_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vbool64_t vs2, size_t vl) { +vuint8mf8_t test_viota_m_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vbool64_t vs2, + size_t vl) { return __riscv_viota_m_u8mf8_mu(vm, vd, vs2, vl); } -vuint8mf4_t test_viota_m_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vbool32_t vs2, size_t vl) { +vuint8mf4_t test_viota_m_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vbool32_t vs2, + size_t vl) { return __riscv_viota_m_u8mf4_mu(vm, vd, vs2, vl); } -vuint8mf2_t test_viota_m_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vbool16_t vs2, size_t vl) { +vuint8mf2_t test_viota_m_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vbool16_t vs2, + size_t vl) { return __riscv_viota_m_u8mf2_mu(vm, vd, vs2, vl); } -vuint8m1_t test_viota_m_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vbool8_t vs2, size_t vl) { +vuint8m1_t test_viota_m_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vbool8_t vs2, + size_t vl) { return __riscv_viota_m_u8m1_mu(vm, vd, vs2, vl); } -vuint8m2_t test_viota_m_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vbool4_t vs2, size_t vl) { +vuint8m2_t test_viota_m_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vbool4_t vs2, + size_t vl) { return __riscv_viota_m_u8m2_mu(vm, vd, vs2, vl); } -vuint8m4_t test_viota_m_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vbool2_t vs2, size_t vl) { +vuint8m4_t test_viota_m_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vbool2_t vs2, + size_t vl) { return __riscv_viota_m_u8m4_mu(vm, vd, vs2, vl); } -vuint8m8_t test_viota_m_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vbool1_t vs2, size_t vl) { +vuint8m8_t test_viota_m_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vbool1_t vs2, + size_t vl) { return __riscv_viota_m_u8m8_mu(vm, vd, vs2, vl); } -vuint16mf4_t test_viota_m_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vbool64_t vs2, size_t vl) { +vuint16mf4_t test_viota_m_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vbool64_t vs2, size_t vl) { return __riscv_viota_m_u16mf4_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_viota_m_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vbool32_t vs2, size_t vl) { +vuint16mf2_t test_viota_m_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vbool32_t vs2, size_t vl) { return __riscv_viota_m_u16mf2_mu(vm, vd, vs2, vl); } -vuint16m1_t test_viota_m_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vbool16_t vs2, size_t vl) { +vuint16m1_t test_viota_m_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vbool16_t vs2, + size_t vl) { return __riscv_viota_m_u16m1_mu(vm, vd, vs2, vl); } -vuint16m2_t test_viota_m_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vbool8_t vs2, size_t vl) { +vuint16m2_t test_viota_m_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vbool8_t vs2, + size_t vl) { return __riscv_viota_m_u16m2_mu(vm, vd, vs2, vl); } -vuint16m4_t test_viota_m_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vbool4_t vs2, size_t vl) { +vuint16m4_t test_viota_m_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vbool4_t vs2, + size_t vl) { return __riscv_viota_m_u16m4_mu(vm, vd, vs2, vl); } -vuint16m8_t test_viota_m_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vbool2_t vs2, size_t vl) { +vuint16m8_t test_viota_m_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vbool2_t vs2, + size_t vl) { return __riscv_viota_m_u16m8_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_viota_m_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vbool64_t vs2, size_t vl) { +vuint32mf2_t test_viota_m_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vbool64_t vs2, size_t vl) { return __riscv_viota_m_u32mf2_mu(vm, vd, vs2, vl); } -vuint32m1_t test_viota_m_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vbool32_t vs2, size_t vl) { +vuint32m1_t test_viota_m_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vbool32_t vs2, + size_t vl) { return __riscv_viota_m_u32m1_mu(vm, vd, vs2, vl); } -vuint32m2_t test_viota_m_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vbool16_t vs2, size_t vl) { +vuint32m2_t test_viota_m_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vbool16_t vs2, + size_t vl) { return __riscv_viota_m_u32m2_mu(vm, vd, vs2, vl); } -vuint32m4_t test_viota_m_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vbool8_t vs2, size_t vl) { +vuint32m4_t test_viota_m_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vbool8_t vs2, + size_t vl) { return __riscv_viota_m_u32m4_mu(vm, vd, vs2, vl); } -vuint32m8_t test_viota_m_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vbool4_t vs2, size_t vl) { +vuint32m8_t test_viota_m_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vbool4_t vs2, + size_t vl) { return __riscv_viota_m_u32m8_mu(vm, vd, vs2, vl); } -vuint64m1_t test_viota_m_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vbool64_t vs2, size_t vl) { +vuint64m1_t test_viota_m_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vbool64_t vs2, + size_t vl) { return __riscv_viota_m_u64m1_mu(vm, vd, vs2, vl); } -vuint64m2_t test_viota_m_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vbool32_t vs2, size_t vl) { +vuint64m2_t test_viota_m_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vbool32_t vs2, + size_t vl) { return __riscv_viota_m_u64m2_mu(vm, vd, vs2, vl); } -vuint64m4_t test_viota_m_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vbool16_t vs2, size_t vl) { +vuint64m4_t test_viota_m_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vbool16_t vs2, + size_t vl) { return __riscv_viota_m_u64m4_mu(vm, vd, vs2, vl); } -vuint64m8_t test_viota_m_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vbool8_t vs2, size_t vl) { +vuint64m8_t test_viota_m_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vbool8_t vs2, + size_t vl) { return __riscv_viota_m_u64m8_mu(vm, vd, vs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vle16.c b/auto-generated/policy_funcs/llvm-api-tests/vle16.c index 1a4f1b364..8f36eb6e7 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vle16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vle16.c @@ -1,40 +1,48 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vle16_v_f16mf4_tu(vfloat16mf4_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4_t test_vle16_v_f16mf4_tu(vfloat16mf4_t vd, const _Float16 *rs1, + size_t vl) { return __riscv_vle16_v_f16mf4_tu(vd, rs1, vl); } -vfloat16mf2_t test_vle16_v_f16mf2_tu(vfloat16mf2_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2_t test_vle16_v_f16mf2_tu(vfloat16mf2_t vd, const _Float16 *rs1, + size_t vl) { return __riscv_vle16_v_f16mf2_tu(vd, rs1, vl); } -vfloat16m1_t test_vle16_v_f16m1_tu(vfloat16m1_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1_t test_vle16_v_f16m1_tu(vfloat16m1_t vd, const _Float16 *rs1, + size_t vl) { return __riscv_vle16_v_f16m1_tu(vd, rs1, vl); } -vfloat16m2_t test_vle16_v_f16m2_tu(vfloat16m2_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m2_t test_vle16_v_f16m2_tu(vfloat16m2_t vd, const _Float16 *rs1, + size_t vl) { return __riscv_vle16_v_f16m2_tu(vd, rs1, vl); } -vfloat16m4_t test_vle16_v_f16m4_tu(vfloat16m4_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m4_t test_vle16_v_f16m4_tu(vfloat16m4_t vd, const _Float16 *rs1, + size_t vl) { return __riscv_vle16_v_f16m4_tu(vd, rs1, vl); } -vfloat16m8_t test_vle16_v_f16m8_tu(vfloat16m8_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m8_t test_vle16_v_f16m8_tu(vfloat16m8_t vd, const _Float16 *rs1, + size_t vl) { return __riscv_vle16_v_f16m8_tu(vd, rs1, vl); } -vint16mf4_t test_vle16_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, size_t vl) { +vint16mf4_t test_vle16_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, + size_t vl) { return __riscv_vle16_v_i16mf4_tu(vd, rs1, vl); } -vint16mf2_t test_vle16_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, size_t vl) { +vint16mf2_t test_vle16_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, + size_t vl) { return __riscv_vle16_v_i16mf2_tu(vd, rs1, vl); } @@ -54,242 +62,302 @@ vint16m8_t test_vle16_v_i16m8_tu(vint16m8_t vd, const int16_t *rs1, size_t vl) { return __riscv_vle16_v_i16m8_tu(vd, rs1, vl); } -vuint16mf4_t test_vle16_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4_t test_vle16_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, + size_t vl) { return __riscv_vle16_v_u16mf4_tu(vd, rs1, vl); } -vuint16mf2_t test_vle16_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2_t test_vle16_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, + size_t vl) { return __riscv_vle16_v_u16mf2_tu(vd, rs1, vl); } -vuint16m1_t test_vle16_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1_t test_vle16_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, + size_t vl) { return __riscv_vle16_v_u16m1_tu(vd, rs1, vl); } -vuint16m2_t test_vle16_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, size_t vl) { +vuint16m2_t test_vle16_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, + size_t vl) { return __riscv_vle16_v_u16m2_tu(vd, rs1, vl); } -vuint16m4_t test_vle16_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, size_t vl) { +vuint16m4_t test_vle16_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, + size_t vl) { return __riscv_vle16_v_u16m4_tu(vd, rs1, vl); } -vuint16m8_t test_vle16_v_u16m8_tu(vuint16m8_t vd, const uint16_t *rs1, size_t vl) { +vuint16m8_t test_vle16_v_u16m8_tu(vuint16m8_t vd, const uint16_t *rs1, + size_t vl) { return __riscv_vle16_v_u16m8_tu(vd, rs1, vl); } -vfloat16mf4_t test_vle16_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4_t test_vle16_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vle16_v_f16mf4_tum(vm, vd, rs1, vl); } -vfloat16mf2_t test_vle16_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2_t test_vle16_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vle16_v_f16mf2_tum(vm, vd, rs1, vl); } -vfloat16m1_t test_vle16_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1_t test_vle16_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vle16_v_f16m1_tum(vm, vd, rs1, vl); } -vfloat16m2_t test_vle16_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m2_t test_vle16_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vle16_v_f16m2_tum(vm, vd, rs1, vl); } -vfloat16m4_t test_vle16_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m4_t test_vle16_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vle16_v_f16m4_tum(vm, vd, rs1, vl); } -vfloat16m8_t test_vle16_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m8_t test_vle16_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vle16_v_f16m8_tum(vm, vd, rs1, vl); } -vint16mf4_t test_vle16_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, size_t vl) { +vint16mf4_t test_vle16_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vle16_v_i16mf4_tum(vm, vd, rs1, vl); } -vint16mf2_t test_vle16_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, size_t vl) { +vint16mf2_t test_vle16_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vle16_v_i16mf2_tum(vm, vd, rs1, vl); } -vint16m1_t test_vle16_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, size_t vl) { +vint16m1_t test_vle16_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vle16_v_i16m1_tum(vm, vd, rs1, vl); } -vint16m2_t test_vle16_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, size_t vl) { +vint16m2_t test_vle16_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vle16_v_i16m2_tum(vm, vd, rs1, vl); } -vint16m4_t test_vle16_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, size_t vl) { +vint16m4_t test_vle16_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vle16_v_i16m4_tum(vm, vd, rs1, vl); } -vint16m8_t test_vle16_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, size_t vl) { +vint16m8_t test_vle16_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vle16_v_i16m8_tum(vm, vd, rs1, vl); } -vuint16mf4_t test_vle16_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4_t test_vle16_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vle16_v_u16mf4_tum(vm, vd, rs1, vl); } -vuint16mf2_t test_vle16_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2_t test_vle16_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vle16_v_u16mf2_tum(vm, vd, rs1, vl); } -vuint16m1_t test_vle16_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1_t test_vle16_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vle16_v_u16m1_tum(vm, vd, rs1, vl); } -vuint16m2_t test_vle16_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, size_t vl) { +vuint16m2_t test_vle16_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vle16_v_u16m2_tum(vm, vd, rs1, vl); } -vuint16m4_t test_vle16_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, size_t vl) { +vuint16m4_t test_vle16_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vle16_v_u16m4_tum(vm, vd, rs1, vl); } -vuint16m8_t test_vle16_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, size_t vl) { +vuint16m8_t test_vle16_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vle16_v_u16m8_tum(vm, vd, rs1, vl); } -vfloat16mf4_t test_vle16_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4_t test_vle16_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vle16_v_f16mf4_tumu(vm, vd, rs1, vl); } -vfloat16mf2_t test_vle16_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2_t test_vle16_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vle16_v_f16mf2_tumu(vm, vd, rs1, vl); } -vfloat16m1_t test_vle16_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1_t test_vle16_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vle16_v_f16m1_tumu(vm, vd, rs1, vl); } -vfloat16m2_t test_vle16_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m2_t test_vle16_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vle16_v_f16m2_tumu(vm, vd, rs1, vl); } -vfloat16m4_t test_vle16_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m4_t test_vle16_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vle16_v_f16m4_tumu(vm, vd, rs1, vl); } -vfloat16m8_t test_vle16_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m8_t test_vle16_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vle16_v_f16m8_tumu(vm, vd, rs1, vl); } -vint16mf4_t test_vle16_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, size_t vl) { +vint16mf4_t test_vle16_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vle16_v_i16mf4_tumu(vm, vd, rs1, vl); } -vint16mf2_t test_vle16_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, size_t vl) { +vint16mf2_t test_vle16_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vle16_v_i16mf2_tumu(vm, vd, rs1, vl); } -vint16m1_t test_vle16_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, size_t vl) { +vint16m1_t test_vle16_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vle16_v_i16m1_tumu(vm, vd, rs1, vl); } -vint16m2_t test_vle16_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, size_t vl) { +vint16m2_t test_vle16_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vle16_v_i16m2_tumu(vm, vd, rs1, vl); } -vint16m4_t test_vle16_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, size_t vl) { +vint16m4_t test_vle16_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vle16_v_i16m4_tumu(vm, vd, rs1, vl); } -vint16m8_t test_vle16_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, size_t vl) { +vint16m8_t test_vle16_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vle16_v_i16m8_tumu(vm, vd, rs1, vl); } -vuint16mf4_t test_vle16_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4_t test_vle16_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vle16_v_u16mf4_tumu(vm, vd, rs1, vl); } -vuint16mf2_t test_vle16_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2_t test_vle16_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vle16_v_u16mf2_tumu(vm, vd, rs1, vl); } -vuint16m1_t test_vle16_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1_t test_vle16_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vle16_v_u16m1_tumu(vm, vd, rs1, vl); } -vuint16m2_t test_vle16_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, size_t vl) { +vuint16m2_t test_vle16_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vle16_v_u16m2_tumu(vm, vd, rs1, vl); } -vuint16m4_t test_vle16_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, size_t vl) { +vuint16m4_t test_vle16_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vle16_v_u16m4_tumu(vm, vd, rs1, vl); } -vuint16m8_t test_vle16_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, size_t vl) { +vuint16m8_t test_vle16_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vle16_v_u16m8_tumu(vm, vd, rs1, vl); } -vfloat16mf4_t test_vle16_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4_t test_vle16_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vle16_v_f16mf4_mu(vm, vd, rs1, vl); } -vfloat16mf2_t test_vle16_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2_t test_vle16_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vle16_v_f16mf2_mu(vm, vd, rs1, vl); } -vfloat16m1_t test_vle16_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1_t test_vle16_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vle16_v_f16m1_mu(vm, vd, rs1, vl); } -vfloat16m2_t test_vle16_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m2_t test_vle16_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vle16_v_f16m2_mu(vm, vd, rs1, vl); } -vfloat16m4_t test_vle16_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m4_t test_vle16_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vle16_v_f16m4_mu(vm, vd, rs1, vl); } -vfloat16m8_t test_vle16_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m8_t test_vle16_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vle16_v_f16m8_mu(vm, vd, rs1, vl); } -vint16mf4_t test_vle16_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, size_t vl) { +vint16mf4_t test_vle16_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vle16_v_i16mf4_mu(vm, vd, rs1, vl); } -vint16mf2_t test_vle16_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, size_t vl) { +vint16mf2_t test_vle16_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vle16_v_i16mf2_mu(vm, vd, rs1, vl); } -vint16m1_t test_vle16_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, size_t vl) { +vint16m1_t test_vle16_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vle16_v_i16m1_mu(vm, vd, rs1, vl); } -vint16m2_t test_vle16_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, size_t vl) { +vint16m2_t test_vle16_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, + size_t vl) { return __riscv_vle16_v_i16m2_mu(vm, vd, rs1, vl); } -vint16m4_t test_vle16_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, size_t vl) { +vint16m4_t test_vle16_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, + size_t vl) { return __riscv_vle16_v_i16m4_mu(vm, vd, rs1, vl); } -vint16m8_t test_vle16_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, size_t vl) { +vint16m8_t test_vle16_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, + size_t vl) { return __riscv_vle16_v_i16m8_mu(vm, vd, rs1, vl); } -vuint16mf4_t test_vle16_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4_t test_vle16_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vle16_v_u16mf4_mu(vm, vd, rs1, vl); } -vuint16mf2_t test_vle16_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2_t test_vle16_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vle16_v_u16mf2_mu(vm, vd, rs1, vl); } -vuint16m1_t test_vle16_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1_t test_vle16_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vle16_v_u16m1_mu(vm, vd, rs1, vl); } -vuint16m2_t test_vle16_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, size_t vl) { +vuint16m2_t test_vle16_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vle16_v_u16m2_mu(vm, vd, rs1, vl); } -vuint16m4_t test_vle16_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, size_t vl) { +vuint16m4_t test_vle16_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vle16_v_u16m4_mu(vm, vd, rs1, vl); } -vuint16m8_t test_vle16_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, size_t vl) { +vuint16m8_t test_vle16_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vle16_v_u16m8_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vle16ff.c b/auto-generated/policy_funcs/llvm-api-tests/vle16ff.c index 062c926aa..77687ce39 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vle16ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vle16ff.c @@ -1,295 +1,421 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vle16ff_v_f16mf4_tu(vfloat16mf4_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4_t test_vle16ff_v_f16mf4_tu(vfloat16mf4_t vd, const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle16ff_v_f16mf4_tu(vd, rs1, new_vl, vl); } -vfloat16mf2_t test_vle16ff_v_f16mf2_tu(vfloat16mf2_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2_t test_vle16ff_v_f16mf2_tu(vfloat16mf2_t vd, const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle16ff_v_f16mf2_tu(vd, rs1, new_vl, vl); } -vfloat16m1_t test_vle16ff_v_f16m1_tu(vfloat16m1_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1_t test_vle16ff_v_f16m1_tu(vfloat16m1_t vd, const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle16ff_v_f16m1_tu(vd, rs1, new_vl, vl); } -vfloat16m2_t test_vle16ff_v_f16m2_tu(vfloat16m2_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m2_t test_vle16ff_v_f16m2_tu(vfloat16m2_t vd, const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle16ff_v_f16m2_tu(vd, rs1, new_vl, vl); } -vfloat16m4_t test_vle16ff_v_f16m4_tu(vfloat16m4_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m4_t test_vle16ff_v_f16m4_tu(vfloat16m4_t vd, const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle16ff_v_f16m4_tu(vd, rs1, new_vl, vl); } -vfloat16m8_t test_vle16ff_v_f16m8_tu(vfloat16m8_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m8_t test_vle16ff_v_f16m8_tu(vfloat16m8_t vd, const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle16ff_v_f16m8_tu(vd, rs1, new_vl, vl); } -vint16mf4_t test_vle16ff_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4_t test_vle16ff_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle16ff_v_i16mf4_tu(vd, rs1, new_vl, vl); } -vint16mf2_t test_vle16ff_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2_t test_vle16ff_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle16ff_v_i16mf2_tu(vd, rs1, new_vl, vl); } -vint16m1_t test_vle16ff_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1_t test_vle16ff_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle16ff_v_i16m1_tu(vd, rs1, new_vl, vl); } -vint16m2_t test_vle16ff_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m2_t test_vle16ff_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle16ff_v_i16m2_tu(vd, rs1, new_vl, vl); } -vint16m4_t test_vle16ff_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m4_t test_vle16ff_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle16ff_v_i16m4_tu(vd, rs1, new_vl, vl); } -vint16m8_t test_vle16ff_v_i16m8_tu(vint16m8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m8_t test_vle16ff_v_i16m8_tu(vint16m8_t vd, const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle16ff_v_i16m8_tu(vd, rs1, new_vl, vl); } -vuint16mf4_t test_vle16ff_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4_t test_vle16ff_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle16ff_v_u16mf4_tu(vd, rs1, new_vl, vl); } -vuint16mf2_t test_vle16ff_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2_t test_vle16ff_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle16ff_v_u16mf2_tu(vd, rs1, new_vl, vl); } -vuint16m1_t test_vle16ff_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1_t test_vle16ff_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle16ff_v_u16m1_tu(vd, rs1, new_vl, vl); } -vuint16m2_t test_vle16ff_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m2_t test_vle16ff_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle16ff_v_u16m2_tu(vd, rs1, new_vl, vl); } -vuint16m4_t test_vle16ff_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m4_t test_vle16ff_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle16ff_v_u16m4_tu(vd, rs1, new_vl, vl); } -vuint16m8_t test_vle16ff_v_u16m8_tu(vuint16m8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m8_t test_vle16ff_v_u16m8_tu(vuint16m8_t vd, const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle16ff_v_u16m8_tu(vd, rs1, new_vl, vl); } -vfloat16mf4_t test_vle16ff_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4_t test_vle16ff_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_f16mf4_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf2_t test_vle16ff_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2_t test_vle16ff_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_f16mf2_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m1_t test_vle16ff_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1_t test_vle16ff_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_f16m1_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m2_t test_vle16ff_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m2_t test_vle16ff_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_f16m2_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m4_t test_vle16ff_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m4_t test_vle16ff_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + const _Float16 *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_f16m4_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m8_t test_vle16ff_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m8_t test_vle16ff_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + const _Float16 *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_f16m8_tum(vm, vd, rs1, new_vl, vl); } -vint16mf4_t test_vle16ff_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4_t test_vle16ff_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_i16mf4_tum(vm, vd, rs1, new_vl, vl); } -vint16mf2_t test_vle16ff_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2_t test_vle16ff_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_i16mf2_tum(vm, vd, rs1, new_vl, vl); } -vint16m1_t test_vle16ff_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1_t test_vle16ff_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_i16m1_tum(vm, vd, rs1, new_vl, vl); } -vint16m2_t test_vle16ff_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m2_t test_vle16ff_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_i16m2_tum(vm, vd, rs1, new_vl, vl); } -vint16m4_t test_vle16ff_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m4_t test_vle16ff_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_i16m4_tum(vm, vd, rs1, new_vl, vl); } -vint16m8_t test_vle16ff_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m8_t test_vle16ff_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_i16m8_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf4_t test_vle16ff_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4_t test_vle16ff_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_u16mf4_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf2_t test_vle16ff_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2_t test_vle16ff_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_u16mf2_tum(vm, vd, rs1, new_vl, vl); } -vuint16m1_t test_vle16ff_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1_t test_vle16ff_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_u16m1_tum(vm, vd, rs1, new_vl, vl); } -vuint16m2_t test_vle16ff_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m2_t test_vle16ff_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_u16m2_tum(vm, vd, rs1, new_vl, vl); } -vuint16m4_t test_vle16ff_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m4_t test_vle16ff_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_u16m4_tum(vm, vd, rs1, new_vl, vl); } -vuint16m8_t test_vle16ff_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m8_t test_vle16ff_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_u16m8_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf4_t test_vle16ff_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4_t test_vle16ff_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_f16mf4_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2_t test_vle16ff_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2_t test_vle16ff_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_f16mf2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m1_t test_vle16ff_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1_t test_vle16ff_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_f16m1_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m2_t test_vle16ff_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m2_t test_vle16ff_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_f16m2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m4_t test_vle16ff_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m4_t test_vle16ff_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + const _Float16 *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_f16m4_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m8_t test_vle16ff_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m8_t test_vle16ff_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + const _Float16 *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_f16m8_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf4_t test_vle16ff_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4_t test_vle16ff_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_i16mf4_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf2_t test_vle16ff_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2_t test_vle16ff_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_i16mf2_tumu(vm, vd, rs1, new_vl, vl); } -vint16m1_t test_vle16ff_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1_t test_vle16ff_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_i16m1_tumu(vm, vd, rs1, new_vl, vl); } -vint16m2_t test_vle16ff_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m2_t test_vle16ff_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_i16m2_tumu(vm, vd, rs1, new_vl, vl); } -vint16m4_t test_vle16ff_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m4_t test_vle16ff_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_i16m4_tumu(vm, vd, rs1, new_vl, vl); } -vint16m8_t test_vle16ff_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m8_t test_vle16ff_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_i16m8_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf4_t test_vle16ff_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4_t test_vle16ff_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_u16mf4_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf2_t test_vle16ff_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2_t test_vle16ff_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_u16mf2_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m1_t test_vle16ff_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1_t test_vle16ff_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_u16m1_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m2_t test_vle16ff_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m2_t test_vle16ff_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_u16m2_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m4_t test_vle16ff_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m4_t test_vle16ff_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_u16m4_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m8_t test_vle16ff_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m8_t test_vle16ff_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_u16m8_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf4_t test_vle16ff_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4_t test_vle16ff_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_f16mf4_mu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2_t test_vle16ff_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2_t test_vle16ff_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_f16mf2_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m1_t test_vle16ff_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1_t test_vle16ff_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_f16m1_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m2_t test_vle16ff_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m2_t test_vle16ff_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_f16m2_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m4_t test_vle16ff_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m4_t test_vle16ff_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + const _Float16 *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_f16m4_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m8_t test_vle16ff_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m8_t test_vle16ff_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + const _Float16 *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_f16m8_mu(vm, vd, rs1, new_vl, vl); } -vint16mf4_t test_vle16ff_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4_t test_vle16ff_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_i16mf4_mu(vm, vd, rs1, new_vl, vl); } -vint16mf2_t test_vle16ff_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2_t test_vle16ff_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_i16mf2_mu(vm, vd, rs1, new_vl, vl); } -vint16m1_t test_vle16ff_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1_t test_vle16ff_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_i16m1_mu(vm, vd, rs1, new_vl, vl); } -vint16m2_t test_vle16ff_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m2_t test_vle16ff_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_i16m2_mu(vm, vd, rs1, new_vl, vl); } -vint16m4_t test_vle16ff_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m4_t test_vle16ff_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_i16m4_mu(vm, vd, rs1, new_vl, vl); } -vint16m8_t test_vle16ff_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m8_t test_vle16ff_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_i16m8_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf4_t test_vle16ff_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4_t test_vle16ff_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_u16mf4_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf2_t test_vle16ff_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2_t test_vle16ff_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_u16mf2_mu(vm, vd, rs1, new_vl, vl); } -vuint16m1_t test_vle16ff_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1_t test_vle16ff_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_u16m1_mu(vm, vd, rs1, new_vl, vl); } -vuint16m2_t test_vle16ff_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m2_t test_vle16ff_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_u16m2_mu(vm, vd, rs1, new_vl, vl); } -vuint16m4_t test_vle16ff_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m4_t test_vle16ff_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_u16m4_mu(vm, vd, rs1, new_vl, vl); } -vuint16m8_t test_vle16ff_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m8_t test_vle16ff_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle16ff_v_u16m8_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vle32.c b/auto-generated/policy_funcs/llvm-api-tests/vle32.c index e35c73a94..f0ac63a3f 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vle32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vle32.c @@ -1,32 +1,38 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32mf2_t test_vle32_v_f32mf2_tu(vfloat32mf2_t vd, const float *rs1, size_t vl) { +vfloat32mf2_t test_vle32_v_f32mf2_tu(vfloat32mf2_t vd, const float *rs1, + size_t vl) { return __riscv_vle32_v_f32mf2_tu(vd, rs1, vl); } -vfloat32m1_t test_vle32_v_f32m1_tu(vfloat32m1_t vd, const float *rs1, size_t vl) { +vfloat32m1_t test_vle32_v_f32m1_tu(vfloat32m1_t vd, const float *rs1, + size_t vl) { return __riscv_vle32_v_f32m1_tu(vd, rs1, vl); } -vfloat32m2_t test_vle32_v_f32m2_tu(vfloat32m2_t vd, const float *rs1, size_t vl) { +vfloat32m2_t test_vle32_v_f32m2_tu(vfloat32m2_t vd, const float *rs1, + size_t vl) { return __riscv_vle32_v_f32m2_tu(vd, rs1, vl); } -vfloat32m4_t test_vle32_v_f32m4_tu(vfloat32m4_t vd, const float *rs1, size_t vl) { +vfloat32m4_t test_vle32_v_f32m4_tu(vfloat32m4_t vd, const float *rs1, + size_t vl) { return __riscv_vle32_v_f32m4_tu(vd, rs1, vl); } -vfloat32m8_t test_vle32_v_f32m8_tu(vfloat32m8_t vd, const float *rs1, size_t vl) { +vfloat32m8_t test_vle32_v_f32m8_tu(vfloat32m8_t vd, const float *rs1, + size_t vl) { return __riscv_vle32_v_f32m8_tu(vd, rs1, vl); } -vint32mf2_t test_vle32_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, size_t vl) { +vint32mf2_t test_vle32_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, + size_t vl) { return __riscv_vle32_v_i32mf2_tu(vd, rs1, vl); } @@ -46,202 +52,252 @@ vint32m8_t test_vle32_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, size_t vl) { return __riscv_vle32_v_i32m8_tu(vd, rs1, vl); } -vuint32mf2_t test_vle32_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2_t test_vle32_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, + size_t vl) { return __riscv_vle32_v_u32mf2_tu(vd, rs1, vl); } -vuint32m1_t test_vle32_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1_t test_vle32_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, + size_t vl) { return __riscv_vle32_v_u32m1_tu(vd, rs1, vl); } -vuint32m2_t test_vle32_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, size_t vl) { +vuint32m2_t test_vle32_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, + size_t vl) { return __riscv_vle32_v_u32m2_tu(vd, rs1, vl); } -vuint32m4_t test_vle32_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, size_t vl) { +vuint32m4_t test_vle32_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, + size_t vl) { return __riscv_vle32_v_u32m4_tu(vd, rs1, vl); } -vuint32m8_t test_vle32_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, size_t vl) { +vuint32m8_t test_vle32_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, + size_t vl) { return __riscv_vle32_v_u32m8_tu(vd, rs1, vl); } -vfloat32mf2_t test_vle32_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, size_t vl) { +vfloat32mf2_t test_vle32_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, size_t vl) { return __riscv_vle32_v_f32mf2_tum(vm, vd, rs1, vl); } -vfloat32m1_t test_vle32_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float *rs1, size_t vl) { +vfloat32m1_t test_vle32_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, size_t vl) { return __riscv_vle32_v_f32m1_tum(vm, vd, rs1, vl); } -vfloat32m2_t test_vle32_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float *rs1, size_t vl) { +vfloat32m2_t test_vle32_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, size_t vl) { return __riscv_vle32_v_f32m2_tum(vm, vd, rs1, vl); } -vfloat32m4_t test_vle32_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float *rs1, size_t vl) { +vfloat32m4_t test_vle32_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, size_t vl) { return __riscv_vle32_v_f32m4_tum(vm, vd, rs1, vl); } -vfloat32m8_t test_vle32_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, const float *rs1, size_t vl) { +vfloat32m8_t test_vle32_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + const float *rs1, size_t vl) { return __riscv_vle32_v_f32m8_tum(vm, vd, rs1, vl); } -vint32mf2_t test_vle32_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, size_t vl) { +vint32mf2_t test_vle32_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vle32_v_i32mf2_tum(vm, vd, rs1, vl); } -vint32m1_t test_vle32_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, size_t vl) { +vint32m1_t test_vle32_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vle32_v_i32m1_tum(vm, vd, rs1, vl); } -vint32m2_t test_vle32_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, size_t vl) { +vint32m2_t test_vle32_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vle32_v_i32m2_tum(vm, vd, rs1, vl); } -vint32m4_t test_vle32_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, size_t vl) { +vint32m4_t test_vle32_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vle32_v_i32m4_tum(vm, vd, rs1, vl); } -vint32m8_t test_vle32_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, size_t vl) { +vint32m8_t test_vle32_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vle32_v_i32m8_tum(vm, vd, rs1, vl); } -vuint32mf2_t test_vle32_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2_t test_vle32_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vle32_v_u32mf2_tum(vm, vd, rs1, vl); } -vuint32m1_t test_vle32_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1_t test_vle32_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vle32_v_u32m1_tum(vm, vd, rs1, vl); } -vuint32m2_t test_vle32_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, size_t vl) { +vuint32m2_t test_vle32_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vle32_v_u32m2_tum(vm, vd, rs1, vl); } -vuint32m4_t test_vle32_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, size_t vl) { +vuint32m4_t test_vle32_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vle32_v_u32m4_tum(vm, vd, rs1, vl); } -vuint32m8_t test_vle32_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, size_t vl) { +vuint32m8_t test_vle32_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vle32_v_u32m8_tum(vm, vd, rs1, vl); } -vfloat32mf2_t test_vle32_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, size_t vl) { +vfloat32mf2_t test_vle32_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, size_t vl) { return __riscv_vle32_v_f32mf2_tumu(vm, vd, rs1, vl); } -vfloat32m1_t test_vle32_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float *rs1, size_t vl) { +vfloat32m1_t test_vle32_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, size_t vl) { return __riscv_vle32_v_f32m1_tumu(vm, vd, rs1, vl); } -vfloat32m2_t test_vle32_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float *rs1, size_t vl) { +vfloat32m2_t test_vle32_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, size_t vl) { return __riscv_vle32_v_f32m2_tumu(vm, vd, rs1, vl); } -vfloat32m4_t test_vle32_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float *rs1, size_t vl) { +vfloat32m4_t test_vle32_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, size_t vl) { return __riscv_vle32_v_f32m4_tumu(vm, vd, rs1, vl); } -vfloat32m8_t test_vle32_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, const float *rs1, size_t vl) { +vfloat32m8_t test_vle32_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + const float *rs1, size_t vl) { return __riscv_vle32_v_f32m8_tumu(vm, vd, rs1, vl); } -vint32mf2_t test_vle32_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, size_t vl) { +vint32mf2_t test_vle32_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vle32_v_i32mf2_tumu(vm, vd, rs1, vl); } -vint32m1_t test_vle32_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, size_t vl) { +vint32m1_t test_vle32_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vle32_v_i32m1_tumu(vm, vd, rs1, vl); } -vint32m2_t test_vle32_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, size_t vl) { +vint32m2_t test_vle32_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vle32_v_i32m2_tumu(vm, vd, rs1, vl); } -vint32m4_t test_vle32_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, size_t vl) { +vint32m4_t test_vle32_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vle32_v_i32m4_tumu(vm, vd, rs1, vl); } -vint32m8_t test_vle32_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, size_t vl) { +vint32m8_t test_vle32_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vle32_v_i32m8_tumu(vm, vd, rs1, vl); } -vuint32mf2_t test_vle32_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2_t test_vle32_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vle32_v_u32mf2_tumu(vm, vd, rs1, vl); } -vuint32m1_t test_vle32_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1_t test_vle32_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vle32_v_u32m1_tumu(vm, vd, rs1, vl); } -vuint32m2_t test_vle32_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, size_t vl) { +vuint32m2_t test_vle32_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vle32_v_u32m2_tumu(vm, vd, rs1, vl); } -vuint32m4_t test_vle32_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, size_t vl) { +vuint32m4_t test_vle32_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vle32_v_u32m4_tumu(vm, vd, rs1, vl); } -vuint32m8_t test_vle32_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, size_t vl) { +vuint32m8_t test_vle32_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vle32_v_u32m8_tumu(vm, vd, rs1, vl); } -vfloat32mf2_t test_vle32_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, size_t vl) { +vfloat32mf2_t test_vle32_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, size_t vl) { return __riscv_vle32_v_f32mf2_mu(vm, vd, rs1, vl); } -vfloat32m1_t test_vle32_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float *rs1, size_t vl) { +vfloat32m1_t test_vle32_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, size_t vl) { return __riscv_vle32_v_f32m1_mu(vm, vd, rs1, vl); } -vfloat32m2_t test_vle32_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float *rs1, size_t vl) { +vfloat32m2_t test_vle32_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, size_t vl) { return __riscv_vle32_v_f32m2_mu(vm, vd, rs1, vl); } -vfloat32m4_t test_vle32_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float *rs1, size_t vl) { +vfloat32m4_t test_vle32_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, size_t vl) { return __riscv_vle32_v_f32m4_mu(vm, vd, rs1, vl); } -vfloat32m8_t test_vle32_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, const float *rs1, size_t vl) { +vfloat32m8_t test_vle32_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + const float *rs1, size_t vl) { return __riscv_vle32_v_f32m8_mu(vm, vd, rs1, vl); } -vint32mf2_t test_vle32_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, size_t vl) { +vint32mf2_t test_vle32_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vle32_v_i32mf2_mu(vm, vd, rs1, vl); } -vint32m1_t test_vle32_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, size_t vl) { +vint32m1_t test_vle32_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vle32_v_i32m1_mu(vm, vd, rs1, vl); } -vint32m2_t test_vle32_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, size_t vl) { +vint32m2_t test_vle32_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vle32_v_i32m2_mu(vm, vd, rs1, vl); } -vint32m4_t test_vle32_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, size_t vl) { +vint32m4_t test_vle32_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, + size_t vl) { return __riscv_vle32_v_i32m4_mu(vm, vd, rs1, vl); } -vint32m8_t test_vle32_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, size_t vl) { +vint32m8_t test_vle32_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, + size_t vl) { return __riscv_vle32_v_i32m8_mu(vm, vd, rs1, vl); } -vuint32mf2_t test_vle32_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2_t test_vle32_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vle32_v_u32mf2_mu(vm, vd, rs1, vl); } -vuint32m1_t test_vle32_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1_t test_vle32_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vle32_v_u32m1_mu(vm, vd, rs1, vl); } -vuint32m2_t test_vle32_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, size_t vl) { +vuint32m2_t test_vle32_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vle32_v_u32m2_mu(vm, vd, rs1, vl); } -vuint32m4_t test_vle32_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, size_t vl) { +vuint32m4_t test_vle32_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vle32_v_u32m4_mu(vm, vd, rs1, vl); } -vuint32m8_t test_vle32_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, size_t vl) { +vuint32m8_t test_vle32_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vle32_v_u32m8_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vle32ff.c b/auto-generated/policy_funcs/llvm-api-tests/vle32ff.c index 0f94d01a0..56a12a3ad 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vle32ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vle32ff.c @@ -1,247 +1,352 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32mf2_t test_vle32ff_v_f32mf2_tu(vfloat32mf2_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2_t test_vle32ff_v_f32mf2_tu(vfloat32mf2_t vd, const float *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle32ff_v_f32mf2_tu(vd, rs1, new_vl, vl); } -vfloat32m1_t test_vle32ff_v_f32m1_tu(vfloat32m1_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1_t test_vle32ff_v_f32m1_tu(vfloat32m1_t vd, const float *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle32ff_v_f32m1_tu(vd, rs1, new_vl, vl); } -vfloat32m2_t test_vle32ff_v_f32m2_tu(vfloat32m2_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m2_t test_vle32ff_v_f32m2_tu(vfloat32m2_t vd, const float *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle32ff_v_f32m2_tu(vd, rs1, new_vl, vl); } -vfloat32m4_t test_vle32ff_v_f32m4_tu(vfloat32m4_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m4_t test_vle32ff_v_f32m4_tu(vfloat32m4_t vd, const float *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle32ff_v_f32m4_tu(vd, rs1, new_vl, vl); } -vfloat32m8_t test_vle32ff_v_f32m8_tu(vfloat32m8_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m8_t test_vle32ff_v_f32m8_tu(vfloat32m8_t vd, const float *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle32ff_v_f32m8_tu(vd, rs1, new_vl, vl); } -vint32mf2_t test_vle32ff_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2_t test_vle32ff_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle32ff_v_i32mf2_tu(vd, rs1, new_vl, vl); } -vint32m1_t test_vle32ff_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1_t test_vle32ff_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle32ff_v_i32m1_tu(vd, rs1, new_vl, vl); } -vint32m2_t test_vle32ff_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m2_t test_vle32ff_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle32ff_v_i32m2_tu(vd, rs1, new_vl, vl); } -vint32m4_t test_vle32ff_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m4_t test_vle32ff_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle32ff_v_i32m4_tu(vd, rs1, new_vl, vl); } -vint32m8_t test_vle32ff_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m8_t test_vle32ff_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle32ff_v_i32m8_tu(vd, rs1, new_vl, vl); } -vuint32mf2_t test_vle32ff_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2_t test_vle32ff_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle32ff_v_u32mf2_tu(vd, rs1, new_vl, vl); } -vuint32m1_t test_vle32ff_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1_t test_vle32ff_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle32ff_v_u32m1_tu(vd, rs1, new_vl, vl); } -vuint32m2_t test_vle32ff_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m2_t test_vle32ff_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle32ff_v_u32m2_tu(vd, rs1, new_vl, vl); } -vuint32m4_t test_vle32ff_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m4_t test_vle32ff_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle32ff_v_u32m4_tu(vd, rs1, new_vl, vl); } -vuint32m8_t test_vle32ff_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m8_t test_vle32ff_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle32ff_v_u32m8_tu(vd, rs1, new_vl, vl); } -vfloat32mf2_t test_vle32ff_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2_t test_vle32ff_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_f32mf2_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m1_t test_vle32ff_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1_t test_vle32ff_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_f32m1_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m2_t test_vle32ff_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m2_t test_vle32ff_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_f32m2_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m4_t test_vle32ff_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m4_t test_vle32ff_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_f32m4_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m8_t test_vle32ff_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m8_t test_vle32ff_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_f32m8_tum(vm, vd, rs1, new_vl, vl); } -vint32mf2_t test_vle32ff_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2_t test_vle32ff_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_i32mf2_tum(vm, vd, rs1, new_vl, vl); } -vint32m1_t test_vle32ff_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1_t test_vle32ff_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_i32m1_tum(vm, vd, rs1, new_vl, vl); } -vint32m2_t test_vle32ff_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m2_t test_vle32ff_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_i32m2_tum(vm, vd, rs1, new_vl, vl); } -vint32m4_t test_vle32ff_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m4_t test_vle32ff_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_i32m4_tum(vm, vd, rs1, new_vl, vl); } -vint32m8_t test_vle32ff_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m8_t test_vle32ff_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_i32m8_tum(vm, vd, rs1, new_vl, vl); } -vuint32mf2_t test_vle32ff_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2_t test_vle32ff_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_u32mf2_tum(vm, vd, rs1, new_vl, vl); } -vuint32m1_t test_vle32ff_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1_t test_vle32ff_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_u32m1_tum(vm, vd, rs1, new_vl, vl); } -vuint32m2_t test_vle32ff_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m2_t test_vle32ff_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_u32m2_tum(vm, vd, rs1, new_vl, vl); } -vuint32m4_t test_vle32ff_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m4_t test_vle32ff_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_u32m4_tum(vm, vd, rs1, new_vl, vl); } -vuint32m8_t test_vle32ff_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m8_t test_vle32ff_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_u32m8_tum(vm, vd, rs1, new_vl, vl); } -vfloat32mf2_t test_vle32ff_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2_t test_vle32ff_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_f32mf2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m1_t test_vle32ff_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1_t test_vle32ff_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_f32m1_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m2_t test_vle32ff_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m2_t test_vle32ff_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_f32m2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m4_t test_vle32ff_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m4_t test_vle32ff_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_f32m4_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m8_t test_vle32ff_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m8_t test_vle32ff_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_f32m8_tumu(vm, vd, rs1, new_vl, vl); } -vint32mf2_t test_vle32ff_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2_t test_vle32ff_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_i32mf2_tumu(vm, vd, rs1, new_vl, vl); } -vint32m1_t test_vle32ff_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1_t test_vle32ff_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_i32m1_tumu(vm, vd, rs1, new_vl, vl); } -vint32m2_t test_vle32ff_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m2_t test_vle32ff_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_i32m2_tumu(vm, vd, rs1, new_vl, vl); } -vint32m4_t test_vle32ff_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m4_t test_vle32ff_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_i32m4_tumu(vm, vd, rs1, new_vl, vl); } -vint32m8_t test_vle32ff_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m8_t test_vle32ff_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_i32m8_tumu(vm, vd, rs1, new_vl, vl); } -vuint32mf2_t test_vle32ff_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2_t test_vle32ff_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_u32mf2_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m1_t test_vle32ff_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1_t test_vle32ff_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_u32m1_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m2_t test_vle32ff_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m2_t test_vle32ff_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_u32m2_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m4_t test_vle32ff_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m4_t test_vle32ff_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_u32m4_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m8_t test_vle32ff_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m8_t test_vle32ff_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_u32m8_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32mf2_t test_vle32ff_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2_t test_vle32ff_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_f32mf2_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m1_t test_vle32ff_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1_t test_vle32ff_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_f32m1_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m2_t test_vle32ff_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m2_t test_vle32ff_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_f32m2_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m4_t test_vle32ff_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m4_t test_vle32ff_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_f32m4_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m8_t test_vle32ff_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m8_t test_vle32ff_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_f32m8_mu(vm, vd, rs1, new_vl, vl); } -vint32mf2_t test_vle32ff_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2_t test_vle32ff_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_i32mf2_mu(vm, vd, rs1, new_vl, vl); } -vint32m1_t test_vle32ff_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1_t test_vle32ff_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_i32m1_mu(vm, vd, rs1, new_vl, vl); } -vint32m2_t test_vle32ff_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m2_t test_vle32ff_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_i32m2_mu(vm, vd, rs1, new_vl, vl); } -vint32m4_t test_vle32ff_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m4_t test_vle32ff_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_i32m4_mu(vm, vd, rs1, new_vl, vl); } -vint32m8_t test_vle32ff_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m8_t test_vle32ff_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_i32m8_mu(vm, vd, rs1, new_vl, vl); } -vuint32mf2_t test_vle32ff_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2_t test_vle32ff_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_u32mf2_mu(vm, vd, rs1, new_vl, vl); } -vuint32m1_t test_vle32ff_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1_t test_vle32ff_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_u32m1_mu(vm, vd, rs1, new_vl, vl); } -vuint32m2_t test_vle32ff_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m2_t test_vle32ff_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_u32m2_mu(vm, vd, rs1, new_vl, vl); } -vuint32m4_t test_vle32ff_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m4_t test_vle32ff_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_u32m4_mu(vm, vd, rs1, new_vl, vl); } -vuint32m8_t test_vle32ff_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m8_t test_vle32ff_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle32ff_v_u32m8_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vle64.c b/auto-generated/policy_funcs/llvm-api-tests/vle64.c index c1afd72a4..ff66485b2 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vle64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vle64.c @@ -1,24 +1,28 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat64m1_t test_vle64_v_f64m1_tu(vfloat64m1_t vd, const double *rs1, size_t vl) { +vfloat64m1_t test_vle64_v_f64m1_tu(vfloat64m1_t vd, const double *rs1, + size_t vl) { return __riscv_vle64_v_f64m1_tu(vd, rs1, vl); } -vfloat64m2_t test_vle64_v_f64m2_tu(vfloat64m2_t vd, const double *rs1, size_t vl) { +vfloat64m2_t test_vle64_v_f64m2_tu(vfloat64m2_t vd, const double *rs1, + size_t vl) { return __riscv_vle64_v_f64m2_tu(vd, rs1, vl); } -vfloat64m4_t test_vle64_v_f64m4_tu(vfloat64m4_t vd, const double *rs1, size_t vl) { +vfloat64m4_t test_vle64_v_f64m4_tu(vfloat64m4_t vd, const double *rs1, + size_t vl) { return __riscv_vle64_v_f64m4_tu(vd, rs1, vl); } -vfloat64m8_t test_vle64_v_f64m8_tu(vfloat64m8_t vd, const double *rs1, size_t vl) { +vfloat64m8_t test_vle64_v_f64m8_tu(vfloat64m8_t vd, const double *rs1, + size_t vl) { return __riscv_vle64_v_f64m8_tu(vd, rs1, vl); } @@ -38,162 +42,202 @@ vint64m8_t test_vle64_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, size_t vl) { return __riscv_vle64_v_i64m8_tu(vd, rs1, vl); } -vuint64m1_t test_vle64_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1_t test_vle64_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, + size_t vl) { return __riscv_vle64_v_u64m1_tu(vd, rs1, vl); } -vuint64m2_t test_vle64_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, size_t vl) { +vuint64m2_t test_vle64_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, + size_t vl) { return __riscv_vle64_v_u64m2_tu(vd, rs1, vl); } -vuint64m4_t test_vle64_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, size_t vl) { +vuint64m4_t test_vle64_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, + size_t vl) { return __riscv_vle64_v_u64m4_tu(vd, rs1, vl); } -vuint64m8_t test_vle64_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, size_t vl) { +vuint64m8_t test_vle64_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, + size_t vl) { return __riscv_vle64_v_u64m8_tu(vd, rs1, vl); } -vfloat64m1_t test_vle64_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const double *rs1, size_t vl) { +vfloat64m1_t test_vle64_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, size_t vl) { return __riscv_vle64_v_f64m1_tum(vm, vd, rs1, vl); } -vfloat64m2_t test_vle64_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const double *rs1, size_t vl) { +vfloat64m2_t test_vle64_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, size_t vl) { return __riscv_vle64_v_f64m2_tum(vm, vd, rs1, vl); } -vfloat64m4_t test_vle64_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const double *rs1, size_t vl) { +vfloat64m4_t test_vle64_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, size_t vl) { return __riscv_vle64_v_f64m4_tum(vm, vd, rs1, vl); } -vfloat64m8_t test_vle64_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const double *rs1, size_t vl) { +vfloat64m8_t test_vle64_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, size_t vl) { return __riscv_vle64_v_f64m8_tum(vm, vd, rs1, vl); } -vint64m1_t test_vle64_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, size_t vl) { +vint64m1_t test_vle64_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vle64_v_i64m1_tum(vm, vd, rs1, vl); } -vint64m2_t test_vle64_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, size_t vl) { +vint64m2_t test_vle64_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vle64_v_i64m2_tum(vm, vd, rs1, vl); } -vint64m4_t test_vle64_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, size_t vl) { +vint64m4_t test_vle64_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vle64_v_i64m4_tum(vm, vd, rs1, vl); } -vint64m8_t test_vle64_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, size_t vl) { +vint64m8_t test_vle64_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vle64_v_i64m8_tum(vm, vd, rs1, vl); } -vuint64m1_t test_vle64_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1_t test_vle64_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vle64_v_u64m1_tum(vm, vd, rs1, vl); } -vuint64m2_t test_vle64_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, size_t vl) { +vuint64m2_t test_vle64_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vle64_v_u64m2_tum(vm, vd, rs1, vl); } -vuint64m4_t test_vle64_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, size_t vl) { +vuint64m4_t test_vle64_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vle64_v_u64m4_tum(vm, vd, rs1, vl); } -vuint64m8_t test_vle64_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, size_t vl) { +vuint64m8_t test_vle64_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vle64_v_u64m8_tum(vm, vd, rs1, vl); } -vfloat64m1_t test_vle64_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const double *rs1, size_t vl) { +vfloat64m1_t test_vle64_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, size_t vl) { return __riscv_vle64_v_f64m1_tumu(vm, vd, rs1, vl); } -vfloat64m2_t test_vle64_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const double *rs1, size_t vl) { +vfloat64m2_t test_vle64_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, size_t vl) { return __riscv_vle64_v_f64m2_tumu(vm, vd, rs1, vl); } -vfloat64m4_t test_vle64_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const double *rs1, size_t vl) { +vfloat64m4_t test_vle64_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, size_t vl) { return __riscv_vle64_v_f64m4_tumu(vm, vd, rs1, vl); } -vfloat64m8_t test_vle64_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const double *rs1, size_t vl) { +vfloat64m8_t test_vle64_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, size_t vl) { return __riscv_vle64_v_f64m8_tumu(vm, vd, rs1, vl); } -vint64m1_t test_vle64_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, size_t vl) { +vint64m1_t test_vle64_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vle64_v_i64m1_tumu(vm, vd, rs1, vl); } -vint64m2_t test_vle64_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, size_t vl) { +vint64m2_t test_vle64_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vle64_v_i64m2_tumu(vm, vd, rs1, vl); } -vint64m4_t test_vle64_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, size_t vl) { +vint64m4_t test_vle64_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vle64_v_i64m4_tumu(vm, vd, rs1, vl); } -vint64m8_t test_vle64_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, size_t vl) { +vint64m8_t test_vle64_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vle64_v_i64m8_tumu(vm, vd, rs1, vl); } -vuint64m1_t test_vle64_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1_t test_vle64_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vle64_v_u64m1_tumu(vm, vd, rs1, vl); } -vuint64m2_t test_vle64_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, size_t vl) { +vuint64m2_t test_vle64_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vle64_v_u64m2_tumu(vm, vd, rs1, vl); } -vuint64m4_t test_vle64_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, size_t vl) { +vuint64m4_t test_vle64_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vle64_v_u64m4_tumu(vm, vd, rs1, vl); } -vuint64m8_t test_vle64_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, size_t vl) { +vuint64m8_t test_vle64_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vle64_v_u64m8_tumu(vm, vd, rs1, vl); } -vfloat64m1_t test_vle64_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const double *rs1, size_t vl) { +vfloat64m1_t test_vle64_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, size_t vl) { return __riscv_vle64_v_f64m1_mu(vm, vd, rs1, vl); } -vfloat64m2_t test_vle64_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const double *rs1, size_t vl) { +vfloat64m2_t test_vle64_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, size_t vl) { return __riscv_vle64_v_f64m2_mu(vm, vd, rs1, vl); } -vfloat64m4_t test_vle64_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const double *rs1, size_t vl) { +vfloat64m4_t test_vle64_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, size_t vl) { return __riscv_vle64_v_f64m4_mu(vm, vd, rs1, vl); } -vfloat64m8_t test_vle64_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const double *rs1, size_t vl) { +vfloat64m8_t test_vle64_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, size_t vl) { return __riscv_vle64_v_f64m8_mu(vm, vd, rs1, vl); } -vint64m1_t test_vle64_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, size_t vl) { +vint64m1_t test_vle64_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vle64_v_i64m1_mu(vm, vd, rs1, vl); } -vint64m2_t test_vle64_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, size_t vl) { +vint64m2_t test_vle64_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vle64_v_i64m2_mu(vm, vd, rs1, vl); } -vint64m4_t test_vle64_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, size_t vl) { +vint64m4_t test_vle64_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vle64_v_i64m4_mu(vm, vd, rs1, vl); } -vint64m8_t test_vle64_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, size_t vl) { +vint64m8_t test_vle64_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, + size_t vl) { return __riscv_vle64_v_i64m8_mu(vm, vd, rs1, vl); } -vuint64m1_t test_vle64_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1_t test_vle64_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vle64_v_u64m1_mu(vm, vd, rs1, vl); } -vuint64m2_t test_vle64_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, size_t vl) { +vuint64m2_t test_vle64_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vle64_v_u64m2_mu(vm, vd, rs1, vl); } -vuint64m4_t test_vle64_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, size_t vl) { +vuint64m4_t test_vle64_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vle64_v_u64m4_mu(vm, vd, rs1, vl); } -vuint64m8_t test_vle64_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, size_t vl) { +vuint64m8_t test_vle64_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vle64_v_u64m8_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vle64ff.c b/auto-generated/policy_funcs/llvm-api-tests/vle64ff.c index e67484826..94473b1b8 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vle64ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vle64ff.c @@ -1,199 +1,283 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat64m1_t test_vle64ff_v_f64m1_tu(vfloat64m1_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1_t test_vle64ff_v_f64m1_tu(vfloat64m1_t vd, const double *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle64ff_v_f64m1_tu(vd, rs1, new_vl, vl); } -vfloat64m2_t test_vle64ff_v_f64m2_tu(vfloat64m2_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m2_t test_vle64ff_v_f64m2_tu(vfloat64m2_t vd, const double *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle64ff_v_f64m2_tu(vd, rs1, new_vl, vl); } -vfloat64m4_t test_vle64ff_v_f64m4_tu(vfloat64m4_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m4_t test_vle64ff_v_f64m4_tu(vfloat64m4_t vd, const double *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle64ff_v_f64m4_tu(vd, rs1, new_vl, vl); } -vfloat64m8_t test_vle64ff_v_f64m8_tu(vfloat64m8_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m8_t test_vle64ff_v_f64m8_tu(vfloat64m8_t vd, const double *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle64ff_v_f64m8_tu(vd, rs1, new_vl, vl); } -vint64m1_t test_vle64ff_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1_t test_vle64ff_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle64ff_v_i64m1_tu(vd, rs1, new_vl, vl); } -vint64m2_t test_vle64ff_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m2_t test_vle64ff_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle64ff_v_i64m2_tu(vd, rs1, new_vl, vl); } -vint64m4_t test_vle64ff_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m4_t test_vle64ff_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle64ff_v_i64m4_tu(vd, rs1, new_vl, vl); } -vint64m8_t test_vle64ff_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m8_t test_vle64ff_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle64ff_v_i64m8_tu(vd, rs1, new_vl, vl); } -vuint64m1_t test_vle64ff_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1_t test_vle64ff_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle64ff_v_u64m1_tu(vd, rs1, new_vl, vl); } -vuint64m2_t test_vle64ff_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m2_t test_vle64ff_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle64ff_v_u64m2_tu(vd, rs1, new_vl, vl); } -vuint64m4_t test_vle64ff_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m4_t test_vle64ff_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle64ff_v_u64m4_tu(vd, rs1, new_vl, vl); } -vuint64m8_t test_vle64ff_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m8_t test_vle64ff_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle64ff_v_u64m8_tu(vd, rs1, new_vl, vl); } -vfloat64m1_t test_vle64ff_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1_t test_vle64ff_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_f64m1_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m2_t test_vle64ff_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m2_t test_vle64ff_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_f64m2_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m4_t test_vle64ff_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m4_t test_vle64ff_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_f64m4_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m8_t test_vle64ff_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m8_t test_vle64ff_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_f64m8_tum(vm, vd, rs1, new_vl, vl); } -vint64m1_t test_vle64ff_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1_t test_vle64ff_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_i64m1_tum(vm, vd, rs1, new_vl, vl); } -vint64m2_t test_vle64ff_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m2_t test_vle64ff_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_i64m2_tum(vm, vd, rs1, new_vl, vl); } -vint64m4_t test_vle64ff_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m4_t test_vle64ff_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_i64m4_tum(vm, vd, rs1, new_vl, vl); } -vint64m8_t test_vle64ff_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m8_t test_vle64ff_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_i64m8_tum(vm, vd, rs1, new_vl, vl); } -vuint64m1_t test_vle64ff_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1_t test_vle64ff_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_u64m1_tum(vm, vd, rs1, new_vl, vl); } -vuint64m2_t test_vle64ff_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m2_t test_vle64ff_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_u64m2_tum(vm, vd, rs1, new_vl, vl); } -vuint64m4_t test_vle64ff_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m4_t test_vle64ff_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_u64m4_tum(vm, vd, rs1, new_vl, vl); } -vuint64m8_t test_vle64ff_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m8_t test_vle64ff_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_u64m8_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m1_t test_vle64ff_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1_t test_vle64ff_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_f64m1_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m2_t test_vle64ff_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m2_t test_vle64ff_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_f64m2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m4_t test_vle64ff_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m4_t test_vle64ff_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_f64m4_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m8_t test_vle64ff_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m8_t test_vle64ff_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_f64m8_tumu(vm, vd, rs1, new_vl, vl); } -vint64m1_t test_vle64ff_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1_t test_vle64ff_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_i64m1_tumu(vm, vd, rs1, new_vl, vl); } -vint64m2_t test_vle64ff_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m2_t test_vle64ff_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_i64m2_tumu(vm, vd, rs1, new_vl, vl); } -vint64m4_t test_vle64ff_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m4_t test_vle64ff_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_i64m4_tumu(vm, vd, rs1, new_vl, vl); } -vint64m8_t test_vle64ff_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m8_t test_vle64ff_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_i64m8_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m1_t test_vle64ff_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1_t test_vle64ff_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_u64m1_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m2_t test_vle64ff_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m2_t test_vle64ff_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_u64m2_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m4_t test_vle64ff_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m4_t test_vle64ff_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_u64m4_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m8_t test_vle64ff_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m8_t test_vle64ff_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_u64m8_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m1_t test_vle64ff_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1_t test_vle64ff_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_f64m1_mu(vm, vd, rs1, new_vl, vl); } -vfloat64m2_t test_vle64ff_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m2_t test_vle64ff_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_f64m2_mu(vm, vd, rs1, new_vl, vl); } -vfloat64m4_t test_vle64ff_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m4_t test_vle64ff_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_f64m4_mu(vm, vd, rs1, new_vl, vl); } -vfloat64m8_t test_vle64ff_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m8_t test_vle64ff_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_f64m8_mu(vm, vd, rs1, new_vl, vl); } -vint64m1_t test_vle64ff_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1_t test_vle64ff_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_i64m1_mu(vm, vd, rs1, new_vl, vl); } -vint64m2_t test_vle64ff_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m2_t test_vle64ff_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_i64m2_mu(vm, vd, rs1, new_vl, vl); } -vint64m4_t test_vle64ff_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m4_t test_vle64ff_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_i64m4_mu(vm, vd, rs1, new_vl, vl); } -vint64m8_t test_vle64ff_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m8_t test_vle64ff_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_i64m8_mu(vm, vd, rs1, new_vl, vl); } -vuint64m1_t test_vle64ff_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1_t test_vle64ff_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_u64m1_mu(vm, vd, rs1, new_vl, vl); } -vuint64m2_t test_vle64ff_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m2_t test_vle64ff_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_u64m2_mu(vm, vd, rs1, new_vl, vl); } -vuint64m4_t test_vle64ff_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m4_t test_vle64ff_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_u64m4_mu(vm, vd, rs1, new_vl, vl); } -vuint64m8_t test_vle64ff_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m8_t test_vle64ff_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle64ff_v_u64m8_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vle8.c b/auto-generated/policy_funcs/llvm-api-tests/vle8.c index 05e543f7e..a857f69ae 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vle8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vle8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -34,15 +34,18 @@ vint8m8_t test_vle8_v_i8m8_tu(vint8m8_t vd, const int8_t *rs1, size_t vl) { return __riscv_vle8_v_i8m8_tu(vd, rs1, vl); } -vuint8mf8_t test_vle8_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8_t test_vle8_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vle8_v_u8mf8_tu(vd, rs1, vl); } -vuint8mf4_t test_vle8_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4_t test_vle8_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vle8_v_u8mf4_tu(vd, rs1, vl); } -vuint8mf2_t test_vle8_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2_t test_vle8_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vle8_v_u8mf2_tu(vd, rs1, vl); } @@ -62,170 +65,212 @@ vuint8m8_t test_vle8_v_u8m8_tu(vuint8m8_t vd, const uint8_t *rs1, size_t vl) { return __riscv_vle8_v_u8m8_tu(vd, rs1, vl); } -vint8mf8_t test_vle8_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, size_t vl) { +vint8mf8_t test_vle8_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vle8_v_i8mf8_tum(vm, vd, rs1, vl); } -vint8mf4_t test_vle8_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, size_t vl) { +vint8mf4_t test_vle8_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vle8_v_i8mf4_tum(vm, vd, rs1, vl); } -vint8mf2_t test_vle8_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, size_t vl) { +vint8mf2_t test_vle8_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vle8_v_i8mf2_tum(vm, vd, rs1, vl); } -vint8m1_t test_vle8_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, size_t vl) { +vint8m1_t test_vle8_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vle8_v_i8m1_tum(vm, vd, rs1, vl); } -vint8m2_t test_vle8_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, size_t vl) { +vint8m2_t test_vle8_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vle8_v_i8m2_tum(vm, vd, rs1, vl); } -vint8m4_t test_vle8_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, size_t vl) { +vint8m4_t test_vle8_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vle8_v_i8m4_tum(vm, vd, rs1, vl); } -vint8m8_t test_vle8_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, size_t vl) { +vint8m8_t test_vle8_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vle8_v_i8m8_tum(vm, vd, rs1, vl); } -vuint8mf8_t test_vle8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8_t test_vle8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vle8_v_u8mf8_tum(vm, vd, rs1, vl); } -vuint8mf4_t test_vle8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4_t test_vle8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vle8_v_u8mf4_tum(vm, vd, rs1, vl); } -vuint8mf2_t test_vle8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2_t test_vle8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vle8_v_u8mf2_tum(vm, vd, rs1, vl); } -vuint8m1_t test_vle8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, size_t vl) { +vuint8m1_t test_vle8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vle8_v_u8m1_tum(vm, vd, rs1, vl); } -vuint8m2_t test_vle8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, size_t vl) { +vuint8m2_t test_vle8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vle8_v_u8m2_tum(vm, vd, rs1, vl); } -vuint8m4_t test_vle8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, size_t vl) { +vuint8m4_t test_vle8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vle8_v_u8m4_tum(vm, vd, rs1, vl); } -vuint8m8_t test_vle8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, size_t vl) { +vuint8m8_t test_vle8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vle8_v_u8m8_tum(vm, vd, rs1, vl); } -vint8mf8_t test_vle8_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, size_t vl) { +vint8mf8_t test_vle8_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vle8_v_i8mf8_tumu(vm, vd, rs1, vl); } -vint8mf4_t test_vle8_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, size_t vl) { +vint8mf4_t test_vle8_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vle8_v_i8mf4_tumu(vm, vd, rs1, vl); } -vint8mf2_t test_vle8_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, size_t vl) { +vint8mf2_t test_vle8_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vle8_v_i8mf2_tumu(vm, vd, rs1, vl); } -vint8m1_t test_vle8_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, size_t vl) { +vint8m1_t test_vle8_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vle8_v_i8m1_tumu(vm, vd, rs1, vl); } -vint8m2_t test_vle8_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, size_t vl) { +vint8m2_t test_vle8_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vle8_v_i8m2_tumu(vm, vd, rs1, vl); } -vint8m4_t test_vle8_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, size_t vl) { +vint8m4_t test_vle8_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vle8_v_i8m4_tumu(vm, vd, rs1, vl); } -vint8m8_t test_vle8_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, size_t vl) { +vint8m8_t test_vle8_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vle8_v_i8m8_tumu(vm, vd, rs1, vl); } -vuint8mf8_t test_vle8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8_t test_vle8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vle8_v_u8mf8_tumu(vm, vd, rs1, vl); } -vuint8mf4_t test_vle8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4_t test_vle8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vle8_v_u8mf4_tumu(vm, vd, rs1, vl); } -vuint8mf2_t test_vle8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2_t test_vle8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vle8_v_u8mf2_tumu(vm, vd, rs1, vl); } -vuint8m1_t test_vle8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, size_t vl) { +vuint8m1_t test_vle8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vle8_v_u8m1_tumu(vm, vd, rs1, vl); } -vuint8m2_t test_vle8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, size_t vl) { +vuint8m2_t test_vle8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vle8_v_u8m2_tumu(vm, vd, rs1, vl); } -vuint8m4_t test_vle8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, size_t vl) { +vuint8m4_t test_vle8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vle8_v_u8m4_tumu(vm, vd, rs1, vl); } -vuint8m8_t test_vle8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, size_t vl) { +vuint8m8_t test_vle8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vle8_v_u8m8_tumu(vm, vd, rs1, vl); } -vint8mf8_t test_vle8_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, size_t vl) { +vint8mf8_t test_vle8_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vle8_v_i8mf8_mu(vm, vd, rs1, vl); } -vint8mf4_t test_vle8_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, size_t vl) { +vint8mf4_t test_vle8_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vle8_v_i8mf4_mu(vm, vd, rs1, vl); } -vint8mf2_t test_vle8_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, size_t vl) { +vint8mf2_t test_vle8_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vle8_v_i8mf2_mu(vm, vd, rs1, vl); } -vint8m1_t test_vle8_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, size_t vl) { +vint8m1_t test_vle8_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vle8_v_i8m1_mu(vm, vd, rs1, vl); } -vint8m2_t test_vle8_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, size_t vl) { +vint8m2_t test_vle8_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vle8_v_i8m2_mu(vm, vd, rs1, vl); } -vint8m4_t test_vle8_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, size_t vl) { +vint8m4_t test_vle8_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vle8_v_i8m4_mu(vm, vd, rs1, vl); } -vint8m8_t test_vle8_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, size_t vl) { +vint8m8_t test_vle8_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vle8_v_i8m8_mu(vm, vd, rs1, vl); } -vuint8mf8_t test_vle8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8_t test_vle8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vle8_v_u8mf8_mu(vm, vd, rs1, vl); } -vuint8mf4_t test_vle8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4_t test_vle8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vle8_v_u8mf4_mu(vm, vd, rs1, vl); } -vuint8mf2_t test_vle8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2_t test_vle8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vle8_v_u8mf2_mu(vm, vd, rs1, vl); } -vuint8m1_t test_vle8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, size_t vl) { +vuint8m1_t test_vle8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vle8_v_u8m1_mu(vm, vd, rs1, vl); } -vuint8m2_t test_vle8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, size_t vl) { +vuint8m2_t test_vle8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vle8_v_u8m2_mu(vm, vd, rs1, vl); } -vuint8m4_t test_vle8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, size_t vl) { +vuint8m4_t test_vle8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vle8_v_u8m4_mu(vm, vd, rs1, vl); } -vuint8m8_t test_vle8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, size_t vl) { +vuint8m8_t test_vle8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vle8_v_u8m8_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vle8ff.c b/auto-generated/policy_funcs/llvm-api-tests/vle8ff.c index 0458c1620..4f257b992 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vle8ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vle8ff.c @@ -1,231 +1,313 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vint8mf8_t test_vle8ff_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8_t test_vle8ff_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle8ff_v_i8mf8_tu(vd, rs1, new_vl, vl); } -vint8mf4_t test_vle8ff_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4_t test_vle8ff_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle8ff_v_i8mf4_tu(vd, rs1, new_vl, vl); } -vint8mf2_t test_vle8ff_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2_t test_vle8ff_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle8ff_v_i8mf2_tu(vd, rs1, new_vl, vl); } -vint8m1_t test_vle8ff_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1_t test_vle8ff_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle8ff_v_i8m1_tu(vd, rs1, new_vl, vl); } -vint8m2_t test_vle8ff_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m2_t test_vle8ff_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle8ff_v_i8m2_tu(vd, rs1, new_vl, vl); } -vint8m4_t test_vle8ff_v_i8m4_tu(vint8m4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m4_t test_vle8ff_v_i8m4_tu(vint8m4_t vd, const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle8ff_v_i8m4_tu(vd, rs1, new_vl, vl); } -vint8m8_t test_vle8ff_v_i8m8_tu(vint8m8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m8_t test_vle8ff_v_i8m8_tu(vint8m8_t vd, const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle8ff_v_i8m8_tu(vd, rs1, new_vl, vl); } -vuint8mf8_t test_vle8ff_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8_t test_vle8ff_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle8ff_v_u8mf8_tu(vd, rs1, new_vl, vl); } -vuint8mf4_t test_vle8ff_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4_t test_vle8ff_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle8ff_v_u8mf4_tu(vd, rs1, new_vl, vl); } -vuint8mf2_t test_vle8ff_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2_t test_vle8ff_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle8ff_v_u8mf2_tu(vd, rs1, new_vl, vl); } -vuint8m1_t test_vle8ff_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1_t test_vle8ff_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle8ff_v_u8m1_tu(vd, rs1, new_vl, vl); } -vuint8m2_t test_vle8ff_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m2_t test_vle8ff_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle8ff_v_u8m2_tu(vd, rs1, new_vl, vl); } -vuint8m4_t test_vle8ff_v_u8m4_tu(vuint8m4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m4_t test_vle8ff_v_u8m4_tu(vuint8m4_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle8ff_v_u8m4_tu(vd, rs1, new_vl, vl); } -vuint8m8_t test_vle8ff_v_u8m8_tu(vuint8m8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m8_t test_vle8ff_v_u8m8_tu(vuint8m8_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle8ff_v_u8m8_tu(vd, rs1, new_vl, vl); } -vint8mf8_t test_vle8ff_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8_t test_vle8ff_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle8ff_v_i8mf8_tum(vm, vd, rs1, new_vl, vl); } -vint8mf4_t test_vle8ff_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4_t test_vle8ff_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle8ff_v_i8mf4_tum(vm, vd, rs1, new_vl, vl); } -vint8mf2_t test_vle8ff_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2_t test_vle8ff_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle8ff_v_i8mf2_tum(vm, vd, rs1, new_vl, vl); } -vint8m1_t test_vle8ff_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1_t test_vle8ff_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle8ff_v_i8m1_tum(vm, vd, rs1, new_vl, vl); } -vint8m2_t test_vle8ff_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m2_t test_vle8ff_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle8ff_v_i8m2_tum(vm, vd, rs1, new_vl, vl); } -vint8m4_t test_vle8ff_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m4_t test_vle8ff_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle8ff_v_i8m4_tum(vm, vd, rs1, new_vl, vl); } -vint8m8_t test_vle8ff_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m8_t test_vle8ff_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle8ff_v_i8m8_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf8_t test_vle8ff_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8_t test_vle8ff_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle8ff_v_u8mf8_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf4_t test_vle8ff_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4_t test_vle8ff_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle8ff_v_u8mf4_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf2_t test_vle8ff_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2_t test_vle8ff_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle8ff_v_u8mf2_tum(vm, vd, rs1, new_vl, vl); } -vuint8m1_t test_vle8ff_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1_t test_vle8ff_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle8ff_v_u8m1_tum(vm, vd, rs1, new_vl, vl); } -vuint8m2_t test_vle8ff_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m2_t test_vle8ff_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle8ff_v_u8m2_tum(vm, vd, rs1, new_vl, vl); } -vuint8m4_t test_vle8ff_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m4_t test_vle8ff_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle8ff_v_u8m4_tum(vm, vd, rs1, new_vl, vl); } -vuint8m8_t test_vle8ff_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m8_t test_vle8ff_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle8ff_v_u8m8_tum(vm, vd, rs1, new_vl, vl); } -vint8mf8_t test_vle8ff_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8_t test_vle8ff_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle8ff_v_i8mf8_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf4_t test_vle8ff_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4_t test_vle8ff_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle8ff_v_i8mf4_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf2_t test_vle8ff_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2_t test_vle8ff_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle8ff_v_i8mf2_tumu(vm, vd, rs1, new_vl, vl); } -vint8m1_t test_vle8ff_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1_t test_vle8ff_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle8ff_v_i8m1_tumu(vm, vd, rs1, new_vl, vl); } -vint8m2_t test_vle8ff_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m2_t test_vle8ff_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle8ff_v_i8m2_tumu(vm, vd, rs1, new_vl, vl); } -vint8m4_t test_vle8ff_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m4_t test_vle8ff_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle8ff_v_i8m4_tumu(vm, vd, rs1, new_vl, vl); } -vint8m8_t test_vle8ff_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m8_t test_vle8ff_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle8ff_v_i8m8_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf8_t test_vle8ff_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8_t test_vle8ff_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle8ff_v_u8mf8_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf4_t test_vle8ff_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4_t test_vle8ff_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle8ff_v_u8mf4_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf2_t test_vle8ff_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2_t test_vle8ff_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle8ff_v_u8mf2_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m1_t test_vle8ff_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1_t test_vle8ff_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle8ff_v_u8m1_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m2_t test_vle8ff_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m2_t test_vle8ff_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle8ff_v_u8m2_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m4_t test_vle8ff_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m4_t test_vle8ff_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle8ff_v_u8m4_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m8_t test_vle8ff_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m8_t test_vle8ff_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle8ff_v_u8m8_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf8_t test_vle8ff_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8_t test_vle8ff_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle8ff_v_i8mf8_mu(vm, vd, rs1, new_vl, vl); } -vint8mf4_t test_vle8ff_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4_t test_vle8ff_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle8ff_v_i8mf4_mu(vm, vd, rs1, new_vl, vl); } -vint8mf2_t test_vle8ff_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2_t test_vle8ff_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle8ff_v_i8mf2_mu(vm, vd, rs1, new_vl, vl); } -vint8m1_t test_vle8ff_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1_t test_vle8ff_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle8ff_v_i8m1_mu(vm, vd, rs1, new_vl, vl); } -vint8m2_t test_vle8ff_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m2_t test_vle8ff_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle8ff_v_i8m2_mu(vm, vd, rs1, new_vl, vl); } -vint8m4_t test_vle8ff_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m4_t test_vle8ff_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle8ff_v_i8m4_mu(vm, vd, rs1, new_vl, vl); } -vint8m8_t test_vle8ff_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m8_t test_vle8ff_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle8ff_v_i8m8_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf8_t test_vle8ff_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8_t test_vle8ff_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle8ff_v_u8mf8_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf4_t test_vle8ff_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4_t test_vle8ff_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle8ff_v_u8mf4_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf2_t test_vle8ff_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2_t test_vle8ff_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vle8ff_v_u8mf2_mu(vm, vd, rs1, new_vl, vl); } -vuint8m1_t test_vle8ff_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1_t test_vle8ff_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle8ff_v_u8m1_mu(vm, vd, rs1, new_vl, vl); } -vuint8m2_t test_vle8ff_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m2_t test_vle8ff_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle8ff_v_u8m2_mu(vm, vd, rs1, new_vl, vl); } -vuint8m4_t test_vle8ff_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m4_t test_vle8ff_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle8ff_v_u8m4_mu(vm, vd, rs1, new_vl, vl); } -vuint8m8_t test_vle8ff_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m8_t test_vle8ff_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vle8ff_v_u8m8_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxei16.c b/auto-generated/policy_funcs/llvm-api-tests/vloxei16.c index b653e494d..f3c3b4406 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxei16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxei16.c @@ -1,919 +1,1312 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vloxei16_v_f16mf4_tu(vfloat16mf4_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4_t test_vloxei16_v_f16mf4_tu(vfloat16mf4_t vd, const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxei16_v_f16mf4_tu(vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei16_v_f16mf2_tu(vfloat16mf2_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2_t test_vloxei16_v_f16mf2_tu(vfloat16mf2_t vd, const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxei16_v_f16mf2_tu(vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei16_v_f16m1_tu(vfloat16m1_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1_t test_vloxei16_v_f16m1_tu(vfloat16m1_t vd, const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxei16_v_f16m1_tu(vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei16_v_f16m2_tu(vfloat16m2_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2_t test_vloxei16_v_f16m2_tu(vfloat16m2_t vd, const _Float16 *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxei16_v_f16m2_tu(vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei16_v_f16m4_tu(vfloat16m4_t vd, const _Float16 *rs1, vuint16m4_t rs2, size_t vl) { +vfloat16m4_t test_vloxei16_v_f16m4_tu(vfloat16m4_t vd, const _Float16 *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vloxei16_v_f16m4_tu(vd, rs1, rs2, vl); } -vfloat16m8_t test_vloxei16_v_f16m8_tu(vfloat16m8_t vd, const _Float16 *rs1, vuint16m8_t rs2, size_t vl) { +vfloat16m8_t test_vloxei16_v_f16m8_tu(vfloat16m8_t vd, const _Float16 *rs1, + vuint16m8_t rs2, size_t vl) { return __riscv_vloxei16_v_f16m8_tu(vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei16_v_f32mf2_tu(vfloat32mf2_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2_t test_vloxei16_v_f32mf2_tu(vfloat32mf2_t vd, const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxei16_v_f32mf2_tu(vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei16_v_f32m1_tu(vfloat32m1_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1_t test_vloxei16_v_f32m1_tu(vfloat32m1_t vd, const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxei16_v_f32m1_tu(vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei16_v_f32m2_tu(vfloat32m2_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2_t test_vloxei16_v_f32m2_tu(vfloat32m2_t vd, const float *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxei16_v_f32m2_tu(vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei16_v_f32m4_tu(vfloat32m4_t vd, const float *rs1, vuint16m2_t rs2, size_t vl) { +vfloat32m4_t test_vloxei16_v_f32m4_tu(vfloat32m4_t vd, const float *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxei16_v_f32m4_tu(vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei16_v_f32m8_tu(vfloat32m8_t vd, const float *rs1, vuint16m4_t rs2, size_t vl) { +vfloat32m8_t test_vloxei16_v_f32m8_tu(vfloat32m8_t vd, const float *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vloxei16_v_f32m8_tu(vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei16_v_f64m1_tu(vfloat64m1_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1_t test_vloxei16_v_f64m1_tu(vfloat64m1_t vd, const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxei16_v_f64m1_tu(vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei16_v_f64m2_tu(vfloat64m2_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2_t test_vloxei16_v_f64m2_tu(vfloat64m2_t vd, const double *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxei16_v_f64m2_tu(vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei16_v_f64m4_tu(vfloat64m4_t vd, const double *rs1, vuint16m1_t rs2, size_t vl) { +vfloat64m4_t test_vloxei16_v_f64m4_tu(vfloat64m4_t vd, const double *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxei16_v_f64m4_tu(vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei16_v_f64m8_tu(vfloat64m8_t vd, const double *rs1, vuint16m2_t rs2, size_t vl) { +vfloat64m8_t test_vloxei16_v_f64m8_tu(vfloat64m8_t vd, const double *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxei16_v_f64m8_tu(vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei16_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8_t test_vloxei16_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxei16_v_i8mf8_tu(vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei16_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4_t test_vloxei16_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxei16_v_i8mf4_tu(vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei16_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2_t test_vloxei16_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxei16_v_i8mf2_tu(vd, rs1, rs2, vl); } -vint8m1_t test_vloxei16_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1_t test_vloxei16_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxei16_v_i8m1_tu(vd, rs1, rs2, vl); } -vint8m2_t test_vloxei16_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2_t test_vloxei16_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vloxei16_v_i8m2_tu(vd, rs1, rs2, vl); } -vint8m4_t test_vloxei16_v_i8m4_tu(vint8m4_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { +vint8m4_t test_vloxei16_v_i8m4_tu(vint8m4_t vd, const int8_t *rs1, + vuint16m8_t rs2, size_t vl) { return __riscv_vloxei16_v_i8m4_tu(vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei16_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4_t test_vloxei16_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxei16_v_i16mf4_tu(vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei16_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2_t test_vloxei16_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxei16_v_i16mf2_tu(vd, rs1, rs2, vl); } -vint16m1_t test_vloxei16_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1_t test_vloxei16_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxei16_v_i16m1_tu(vd, rs1, rs2, vl); } -vint16m2_t test_vloxei16_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2_t test_vloxei16_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxei16_v_i16m2_tu(vd, rs1, rs2, vl); } -vint16m4_t test_vloxei16_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { +vint16m4_t test_vloxei16_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vloxei16_v_i16m4_tu(vd, rs1, rs2, vl); } -vint16m8_t test_vloxei16_v_i16m8_tu(vint16m8_t vd, const int16_t *rs1, vuint16m8_t rs2, size_t vl) { +vint16m8_t test_vloxei16_v_i16m8_tu(vint16m8_t vd, const int16_t *rs1, + vuint16m8_t rs2, size_t vl) { return __riscv_vloxei16_v_i16m8_tu(vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei16_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2_t test_vloxei16_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxei16_v_i32mf2_tu(vd, rs1, rs2, vl); } -vint32m1_t test_vloxei16_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1_t test_vloxei16_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxei16_v_i32m1_tu(vd, rs1, rs2, vl); } -vint32m2_t test_vloxei16_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2_t test_vloxei16_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxei16_v_i32m2_tu(vd, rs1, rs2, vl); } -vint32m4_t test_vloxei16_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { +vint32m4_t test_vloxei16_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxei16_v_i32m4_tu(vd, rs1, rs2, vl); } -vint32m8_t test_vloxei16_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, vuint16m4_t rs2, size_t vl) { +vint32m8_t test_vloxei16_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vloxei16_v_i32m8_tu(vd, rs1, rs2, vl); } -vint64m1_t test_vloxei16_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1_t test_vloxei16_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxei16_v_i64m1_tu(vd, rs1, rs2, vl); } -vint64m2_t test_vloxei16_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2_t test_vloxei16_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxei16_v_i64m2_tu(vd, rs1, rs2, vl); } -vint64m4_t test_vloxei16_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { +vint64m4_t test_vloxei16_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxei16_v_i64m4_tu(vd, rs1, rs2, vl); } -vint64m8_t test_vloxei16_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, vuint16m2_t rs2, size_t vl) { +vint64m8_t test_vloxei16_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxei16_v_i64m8_tu(vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei16_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8_t test_vloxei16_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxei16_v_u8mf8_tu(vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei16_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4_t test_vloxei16_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxei16_v_u8mf4_tu(vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei16_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2_t test_vloxei16_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxei16_v_u8mf2_tu(vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei16_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1_t test_vloxei16_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxei16_v_u8m1_tu(vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei16_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2_t test_vloxei16_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vloxei16_v_u8m2_tu(vd, rs1, rs2, vl); } -vuint8m4_t test_vloxei16_v_u8m4_tu(vuint8m4_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { +vuint8m4_t test_vloxei16_v_u8m4_tu(vuint8m4_t vd, const uint8_t *rs1, + vuint16m8_t rs2, size_t vl) { return __riscv_vloxei16_v_u8m4_tu(vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei16_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4_t test_vloxei16_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxei16_v_u16mf4_tu(vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei16_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2_t test_vloxei16_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxei16_v_u16mf2_tu(vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei16_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1_t test_vloxei16_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxei16_v_u16m1_tu(vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei16_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2_t test_vloxei16_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxei16_v_u16m2_tu(vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei16_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint16m4_t test_vloxei16_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vloxei16_v_u16m4_tu(vd, rs1, rs2, vl); } -vuint16m8_t test_vloxei16_v_u16m8_tu(vuint16m8_t vd, const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { +vuint16m8_t test_vloxei16_v_u16m8_tu(vuint16m8_t vd, const uint16_t *rs1, + vuint16m8_t rs2, size_t vl) { return __riscv_vloxei16_v_u16m8_tu(vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei16_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2_t test_vloxei16_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxei16_v_u32mf2_tu(vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei16_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1_t test_vloxei16_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxei16_v_u32m1_tu(vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei16_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2_t test_vloxei16_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxei16_v_u32m2_tu(vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei16_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint32m4_t test_vloxei16_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxei16_v_u32m4_tu(vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei16_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint32m8_t test_vloxei16_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vloxei16_v_u32m8_tu(vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei16_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1_t test_vloxei16_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxei16_v_u64m1_tu(vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei16_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2_t test_vloxei16_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxei16_v_u64m2_tu(vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei16_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint64m4_t test_vloxei16_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxei16_v_u64m4_tu(vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei16_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint64m8_t test_vloxei16_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxei16_v_u64m8_tu(vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei16_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4_t test_vloxei16_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_f16mf4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei16_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2_t test_vloxei16_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_f16mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei16_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1_t test_vloxei16_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_f16m1_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei16_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2_t test_vloxei16_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxei16_v_f16m2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei16_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, const _Float16 *rs1, vuint16m4_t rs2, size_t vl) { +vfloat16m4_t test_vloxei16_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + const _Float16 *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxei16_v_f16m4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vloxei16_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, const _Float16 *rs1, vuint16m8_t rs2, size_t vl) { +vfloat16m8_t test_vloxei16_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + const _Float16 *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vloxei16_v_f16m8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei16_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2_t test_vloxei16_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_f32mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei16_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1_t test_vloxei16_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_f32m1_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei16_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2_t test_vloxei16_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_f32m2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei16_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float *rs1, vuint16m2_t rs2, size_t vl) { +vfloat32m4_t test_vloxei16_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxei16_v_f32m4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei16_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, const float *rs1, vuint16m4_t rs2, size_t vl) { +vfloat32m8_t test_vloxei16_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + const float *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxei16_v_f32m8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei16_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1_t test_vloxei16_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_f64m1_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei16_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2_t test_vloxei16_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_f64m2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei16_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const double *rs1, vuint16m1_t rs2, size_t vl) { +vfloat64m4_t test_vloxei16_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_f64m4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei16_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const double *rs1, vuint16m2_t rs2, size_t vl) { +vfloat64m8_t test_vloxei16_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxei16_v_f64m8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei16_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8_t test_vloxei16_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + const int8_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_i8mf8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei16_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4_t test_vloxei16_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + const int8_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_i8mf4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei16_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2_t test_vloxei16_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_i8mf2_tum(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei16_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1_t test_vloxei16_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxei16_v_i8m1_tum(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vloxei16_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2_t test_vloxei16_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vloxei16_v_i8m2_tum(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vloxei16_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { +vint8m4_t test_vloxei16_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, + vuint16m8_t rs2, size_t vl) { return __riscv_vloxei16_v_i8m4_tum(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei16_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4_t test_vloxei16_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_i16mf4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei16_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2_t test_vloxei16_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_i16mf2_tum(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei16_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1_t test_vloxei16_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_i16m1_tum(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei16_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2_t test_vloxei16_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxei16_v_i16m2_tum(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vloxei16_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { +vint16m4_t test_vloxei16_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, + const int16_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxei16_v_i16m4_tum(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vloxei16_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint16m8_t rs2, size_t vl) { +vint16m8_t test_vloxei16_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, + const int16_t *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vloxei16_v_i16m8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei16_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2_t test_vloxei16_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_i32mf2_tum(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei16_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1_t test_vloxei16_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_i32m1_tum(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei16_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2_t test_vloxei16_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_i32m2_tum(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei16_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { +vint32m4_t test_vloxei16_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxei16_v_i32m4_tum(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vloxei16_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint16m4_t rs2, size_t vl) { +vint32m8_t test_vloxei16_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, + const int32_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxei16_v_i32m8_tum(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei16_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1_t test_vloxei16_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_i64m1_tum(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei16_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2_t test_vloxei16_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_i64m2_tum(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei16_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { +vint64m4_t test_vloxei16_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_i64m4_tum(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei16_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint16m2_t rs2, size_t vl) { +vint64m8_t test_vloxei16_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxei16_v_i64m8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei16_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8_t test_vloxei16_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_u8mf8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei16_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4_t test_vloxei16_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_u8mf4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei16_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2_t test_vloxei16_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_u8mf2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei16_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1_t test_vloxei16_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxei16_v_u8m1_tum(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei16_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2_t test_vloxei16_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, + const uint8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxei16_v_u8m2_tum(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vloxei16_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { +vuint8m4_t test_vloxei16_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, + const uint8_t *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vloxei16_v_u8m4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei16_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4_t test_vloxei16_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_u16mf4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei16_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2_t test_vloxei16_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_u16mf2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei16_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1_t test_vloxei16_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_u16m1_tum(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei16_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2_t test_vloxei16_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxei16_v_u16m2_tum(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei16_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint16m4_t test_vloxei16_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + const uint16_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxei16_v_u16m4_tum(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vloxei16_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { +vuint16m8_t test_vloxei16_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + const uint16_t *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vloxei16_v_u16m8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei16_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2_t test_vloxei16_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_u32mf2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei16_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1_t test_vloxei16_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_u32m1_tum(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei16_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2_t test_vloxei16_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_u32m2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei16_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint32m4_t test_vloxei16_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxei16_v_u32m4_tum(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei16_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint32m8_t test_vloxei16_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + const uint32_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxei16_v_u32m8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei16_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1_t test_vloxei16_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_u64m1_tum(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei16_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2_t test_vloxei16_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_u64m2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei16_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint64m4_t test_vloxei16_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_u64m4_tum(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei16_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint64m8_t test_vloxei16_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxei16_v_u64m8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei16_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4_t test_vloxei16_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_f16mf4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei16_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2_t test_vloxei16_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_f16mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei16_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1_t test_vloxei16_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_f16m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei16_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2_t test_vloxei16_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxei16_v_f16m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei16_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, const _Float16 *rs1, vuint16m4_t rs2, size_t vl) { +vfloat16m4_t test_vloxei16_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + const _Float16 *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxei16_v_f16m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vloxei16_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, const _Float16 *rs1, vuint16m8_t rs2, size_t vl) { +vfloat16m8_t test_vloxei16_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + const _Float16 *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vloxei16_v_f16m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei16_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2_t test_vloxei16_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_f32mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei16_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1_t test_vloxei16_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_f32m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei16_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2_t test_vloxei16_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_f32m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei16_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float *rs1, vuint16m2_t rs2, size_t vl) { +vfloat32m4_t test_vloxei16_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxei16_v_f32m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei16_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, const float *rs1, vuint16m4_t rs2, size_t vl) { +vfloat32m8_t test_vloxei16_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + const float *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxei16_v_f32m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei16_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1_t test_vloxei16_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_f64m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei16_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2_t test_vloxei16_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_f64m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei16_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const double *rs1, vuint16m1_t rs2, size_t vl) { +vfloat64m4_t test_vloxei16_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_f64m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei16_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const double *rs1, vuint16m2_t rs2, size_t vl) { +vfloat64m8_t test_vloxei16_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxei16_v_f64m8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei16_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8_t test_vloxei16_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + const int8_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_i8mf8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei16_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4_t test_vloxei16_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + const int8_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_i8mf4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei16_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2_t test_vloxei16_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_i8mf2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei16_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1_t test_vloxei16_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxei16_v_i8m1_tumu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vloxei16_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2_t test_vloxei16_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, + const int8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxei16_v_i8m2_tumu(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vloxei16_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { +vint8m4_t test_vloxei16_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, + const int8_t *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vloxei16_v_i8m4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei16_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4_t test_vloxei16_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_i16mf4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei16_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2_t test_vloxei16_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_i16mf2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei16_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1_t test_vloxei16_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_i16m1_tumu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei16_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2_t test_vloxei16_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxei16_v_i16m2_tumu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vloxei16_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { +vint16m4_t test_vloxei16_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, + const int16_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxei16_v_i16m4_tumu(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vloxei16_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint16m8_t rs2, size_t vl) { +vint16m8_t test_vloxei16_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, + const int16_t *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vloxei16_v_i16m8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei16_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2_t test_vloxei16_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_i32mf2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei16_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1_t test_vloxei16_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_i32m1_tumu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei16_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2_t test_vloxei16_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_i32m2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei16_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { +vint32m4_t test_vloxei16_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxei16_v_i32m4_tumu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vloxei16_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint16m4_t rs2, size_t vl) { +vint32m8_t test_vloxei16_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, + const int32_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxei16_v_i32m8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei16_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1_t test_vloxei16_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_i64m1_tumu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei16_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2_t test_vloxei16_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_i64m2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei16_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { +vint64m4_t test_vloxei16_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_i64m4_tumu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei16_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint16m2_t rs2, size_t vl) { +vint64m8_t test_vloxei16_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxei16_v_i64m8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei16_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8_t test_vloxei16_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_u8mf8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei16_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4_t test_vloxei16_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_u8mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei16_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2_t test_vloxei16_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_u8mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei16_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1_t test_vloxei16_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxei16_v_u8m1_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei16_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2_t test_vloxei16_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + const uint8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxei16_v_u8m2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vloxei16_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { +vuint8m4_t test_vloxei16_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + const uint8_t *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vloxei16_v_u8m4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei16_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4_t test_vloxei16_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_u16mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei16_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2_t test_vloxei16_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_u16mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei16_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1_t test_vloxei16_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_u16m1_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei16_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2_t test_vloxei16_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxei16_v_u16m2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei16_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint16m4_t test_vloxei16_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + const uint16_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxei16_v_u16m4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vloxei16_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { +vuint16m8_t test_vloxei16_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + const uint16_t *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vloxei16_v_u16m8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei16_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2_t test_vloxei16_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_u32mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei16_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1_t test_vloxei16_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_u32m1_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei16_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2_t test_vloxei16_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_u32m2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei16_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint32m4_t test_vloxei16_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxei16_v_u32m4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei16_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint32m8_t test_vloxei16_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + const uint32_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxei16_v_u32m8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei16_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1_t test_vloxei16_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_u64m1_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei16_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2_t test_vloxei16_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_u64m2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei16_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint64m4_t test_vloxei16_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_u64m4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei16_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint64m8_t test_vloxei16_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxei16_v_u64m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei16_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4_t test_vloxei16_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_f16mf4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei16_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2_t test_vloxei16_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_f16mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei16_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1_t test_vloxei16_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_f16m1_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei16_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2_t test_vloxei16_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxei16_v_f16m2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei16_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, const _Float16 *rs1, vuint16m4_t rs2, size_t vl) { +vfloat16m4_t test_vloxei16_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + const _Float16 *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxei16_v_f16m4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vloxei16_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, const _Float16 *rs1, vuint16m8_t rs2, size_t vl) { +vfloat16m8_t test_vloxei16_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + const _Float16 *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vloxei16_v_f16m8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei16_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2_t test_vloxei16_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_f32mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei16_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1_t test_vloxei16_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_f32m1_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei16_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2_t test_vloxei16_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_f32m2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei16_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float *rs1, vuint16m2_t rs2, size_t vl) { +vfloat32m4_t test_vloxei16_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxei16_v_f32m4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei16_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, const float *rs1, vuint16m4_t rs2, size_t vl) { +vfloat32m8_t test_vloxei16_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + const float *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxei16_v_f32m8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei16_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1_t test_vloxei16_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_f64m1_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei16_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2_t test_vloxei16_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_f64m2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei16_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const double *rs1, vuint16m1_t rs2, size_t vl) { +vfloat64m4_t test_vloxei16_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_f64m4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei16_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const double *rs1, vuint16m2_t rs2, size_t vl) { +vfloat64m8_t test_vloxei16_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxei16_v_f64m8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei16_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8_t test_vloxei16_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, + const int8_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_i8mf8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei16_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4_t test_vloxei16_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, + const int8_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_i8mf4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei16_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2_t test_vloxei16_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_i8mf2_mu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei16_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1_t test_vloxei16_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxei16_v_i8m1_mu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vloxei16_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2_t test_vloxei16_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vloxei16_v_i8m2_mu(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vloxei16_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { +vint8m4_t test_vloxei16_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, + vuint16m8_t rs2, size_t vl) { return __riscv_vloxei16_v_i8m4_mu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei16_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4_t test_vloxei16_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_i16mf4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei16_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2_t test_vloxei16_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_i16mf2_mu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei16_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1_t test_vloxei16_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_i16m1_mu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei16_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2_t test_vloxei16_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxei16_v_i16m2_mu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vloxei16_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { +vint16m4_t test_vloxei16_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, + const int16_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxei16_v_i16m4_mu(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vloxei16_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint16m8_t rs2, size_t vl) { +vint16m8_t test_vloxei16_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, + const int16_t *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vloxei16_v_i16m8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei16_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2_t test_vloxei16_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_i32mf2_mu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei16_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1_t test_vloxei16_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_i32m1_mu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei16_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2_t test_vloxei16_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_i32m2_mu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei16_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { +vint32m4_t test_vloxei16_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxei16_v_i32m4_mu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vloxei16_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint16m4_t rs2, size_t vl) { +vint32m8_t test_vloxei16_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, + const int32_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxei16_v_i32m8_mu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei16_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1_t test_vloxei16_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_i64m1_mu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei16_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2_t test_vloxei16_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_i64m2_mu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei16_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { +vint64m4_t test_vloxei16_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_i64m4_mu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei16_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint16m2_t rs2, size_t vl) { +vint64m8_t test_vloxei16_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxei16_v_i64m8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei16_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8_t test_vloxei16_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_u8mf8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei16_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4_t test_vloxei16_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_u8mf4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei16_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2_t test_vloxei16_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_u8mf2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei16_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1_t test_vloxei16_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxei16_v_u8m1_mu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei16_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2_t test_vloxei16_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, + const uint8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxei16_v_u8m2_mu(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vloxei16_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { +vuint8m4_t test_vloxei16_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, + const uint8_t *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vloxei16_v_u8m4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei16_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4_t test_vloxei16_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_u16mf4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei16_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2_t test_vloxei16_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_u16mf2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei16_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1_t test_vloxei16_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_u16m1_mu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei16_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2_t test_vloxei16_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxei16_v_u16m2_mu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei16_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint16m4_t test_vloxei16_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + const uint16_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxei16_v_u16m4_mu(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vloxei16_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { +vuint16m8_t test_vloxei16_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + const uint16_t *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vloxei16_v_u16m8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei16_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2_t test_vloxei16_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_u32mf2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei16_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1_t test_vloxei16_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_u32m1_mu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei16_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2_t test_vloxei16_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_u32m2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei16_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint32m4_t test_vloxei16_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxei16_v_u32m4_mu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei16_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint32m8_t test_vloxei16_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + const uint32_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxei16_v_u32m8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei16_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1_t test_vloxei16_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxei16_v_u64m1_mu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei16_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2_t test_vloxei16_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxei16_v_u64m2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei16_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint64m4_t test_vloxei16_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxei16_v_u64m4_mu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei16_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint64m8_t test_vloxei16_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxei16_v_u64m8_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxei32.c b/auto-generated/policy_funcs/llvm-api-tests/vloxei32.c index a5da2f9f7..3d38f6d41 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxei32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxei32.c @@ -1,839 +1,1199 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vloxei32_v_f16mf4_tu(vfloat16mf4_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4_t test_vloxei32_v_f16mf4_tu(vfloat16mf4_t vd, const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxei32_v_f16mf4_tu(vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei32_v_f16mf2_tu(vfloat16mf2_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2_t test_vloxei32_v_f16mf2_tu(vfloat16mf2_t vd, const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxei32_v_f16mf2_tu(vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei32_v_f16m1_tu(vfloat16m1_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1_t test_vloxei32_v_f16m1_tu(vfloat16m1_t vd, const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxei32_v_f16m1_tu(vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei32_v_f16m2_tu(vfloat16m2_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2_t test_vloxei32_v_f16m2_tu(vfloat16m2_t vd, const _Float16 *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxei32_v_f16m2_tu(vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei32_v_f16m4_tu(vfloat16m4_t vd, const _Float16 *rs1, vuint32m8_t rs2, size_t vl) { +vfloat16m4_t test_vloxei32_v_f16m4_tu(vfloat16m4_t vd, const _Float16 *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vloxei32_v_f16m4_tu(vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei32_v_f32mf2_tu(vfloat32mf2_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2_t test_vloxei32_v_f32mf2_tu(vfloat32mf2_t vd, const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxei32_v_f32mf2_tu(vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei32_v_f32m1_tu(vfloat32m1_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1_t test_vloxei32_v_f32m1_tu(vfloat32m1_t vd, const float *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxei32_v_f32m1_tu(vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei32_v_f32m2_tu(vfloat32m2_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2_t test_vloxei32_v_f32m2_tu(vfloat32m2_t vd, const float *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxei32_v_f32m2_tu(vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei32_v_f32m4_tu(vfloat32m4_t vd, const float *rs1, vuint32m4_t rs2, size_t vl) { +vfloat32m4_t test_vloxei32_v_f32m4_tu(vfloat32m4_t vd, const float *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxei32_v_f32m4_tu(vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei32_v_f32m8_tu(vfloat32m8_t vd, const float *rs1, vuint32m8_t rs2, size_t vl) { +vfloat32m8_t test_vloxei32_v_f32m8_tu(vfloat32m8_t vd, const float *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vloxei32_v_f32m8_tu(vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei32_v_f64m1_tu(vfloat64m1_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1_t test_vloxei32_v_f64m1_tu(vfloat64m1_t vd, const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxei32_v_f64m1_tu(vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei32_v_f64m2_tu(vfloat64m2_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2_t test_vloxei32_v_f64m2_tu(vfloat64m2_t vd, const double *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxei32_v_f64m2_tu(vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei32_v_f64m4_tu(vfloat64m4_t vd, const double *rs1, vuint32m2_t rs2, size_t vl) { +vfloat64m4_t test_vloxei32_v_f64m4_tu(vfloat64m4_t vd, const double *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxei32_v_f64m4_tu(vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei32_v_f64m8_tu(vfloat64m8_t vd, const double *rs1, vuint32m4_t rs2, size_t vl) { +vfloat64m8_t test_vloxei32_v_f64m8_tu(vfloat64m8_t vd, const double *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxei32_v_f64m8_tu(vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei32_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8_t test_vloxei32_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxei32_v_i8mf8_tu(vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei32_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4_t test_vloxei32_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxei32_v_i8mf4_tu(vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei32_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2_t test_vloxei32_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxei32_v_i8mf2_tu(vd, rs1, rs2, vl); } -vint8m1_t test_vloxei32_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1_t test_vloxei32_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxei32_v_i8m1_tu(vd, rs1, rs2, vl); } -vint8m2_t test_vloxei32_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2_t test_vloxei32_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vloxei32_v_i8m2_tu(vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei32_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4_t test_vloxei32_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxei32_v_i16mf4_tu(vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei32_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2_t test_vloxei32_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxei32_v_i16mf2_tu(vd, rs1, rs2, vl); } -vint16m1_t test_vloxei32_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1_t test_vloxei32_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxei32_v_i16m1_tu(vd, rs1, rs2, vl); } -vint16m2_t test_vloxei32_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2_t test_vloxei32_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxei32_v_i16m2_tu(vd, rs1, rs2, vl); } -vint16m4_t test_vloxei32_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { +vint16m4_t test_vloxei32_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vloxei32_v_i16m4_tu(vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei32_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2_t test_vloxei32_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxei32_v_i32mf2_tu(vd, rs1, rs2, vl); } -vint32m1_t test_vloxei32_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1_t test_vloxei32_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxei32_v_i32m1_tu(vd, rs1, rs2, vl); } -vint32m2_t test_vloxei32_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2_t test_vloxei32_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxei32_v_i32m2_tu(vd, rs1, rs2, vl); } -vint32m4_t test_vloxei32_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { +vint32m4_t test_vloxei32_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxei32_v_i32m4_tu(vd, rs1, rs2, vl); } -vint32m8_t test_vloxei32_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, vuint32m8_t rs2, size_t vl) { +vint32m8_t test_vloxei32_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vloxei32_v_i32m8_tu(vd, rs1, rs2, vl); } -vint64m1_t test_vloxei32_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1_t test_vloxei32_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxei32_v_i64m1_tu(vd, rs1, rs2, vl); } -vint64m2_t test_vloxei32_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2_t test_vloxei32_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxei32_v_i64m2_tu(vd, rs1, rs2, vl); } -vint64m4_t test_vloxei32_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { +vint64m4_t test_vloxei32_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxei32_v_i64m4_tu(vd, rs1, rs2, vl); } -vint64m8_t test_vloxei32_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, vuint32m4_t rs2, size_t vl) { +vint64m8_t test_vloxei32_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxei32_v_i64m8_tu(vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei32_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8_t test_vloxei32_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxei32_v_u8mf8_tu(vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei32_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4_t test_vloxei32_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxei32_v_u8mf4_tu(vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei32_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2_t test_vloxei32_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxei32_v_u8mf2_tu(vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei32_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1_t test_vloxei32_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxei32_v_u8m1_tu(vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei32_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2_t test_vloxei32_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vloxei32_v_u8m2_tu(vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei32_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4_t test_vloxei32_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxei32_v_u16mf4_tu(vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei32_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2_t test_vloxei32_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxei32_v_u16mf2_tu(vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei32_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1_t test_vloxei32_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxei32_v_u16m1_tu(vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei32_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2_t test_vloxei32_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxei32_v_u16m2_tu(vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei32_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint16m4_t test_vloxei32_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vloxei32_v_u16m4_tu(vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei32_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2_t test_vloxei32_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxei32_v_u32mf2_tu(vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei32_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1_t test_vloxei32_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxei32_v_u32m1_tu(vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei32_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2_t test_vloxei32_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxei32_v_u32m2_tu(vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei32_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint32m4_t test_vloxei32_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxei32_v_u32m4_tu(vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei32_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint32m8_t test_vloxei32_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vloxei32_v_u32m8_tu(vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei32_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1_t test_vloxei32_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxei32_v_u64m1_tu(vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei32_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2_t test_vloxei32_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxei32_v_u64m2_tu(vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei32_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint64m4_t test_vloxei32_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxei32_v_u64m4_tu(vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei32_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint64m8_t test_vloxei32_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxei32_v_u64m8_tu(vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei32_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4_t test_vloxei32_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_f16mf4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei32_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2_t test_vloxei32_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_f16mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei32_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1_t test_vloxei32_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_f16m1_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei32_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2_t test_vloxei32_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxei32_v_f16m2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei32_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, const _Float16 *rs1, vuint32m8_t rs2, size_t vl) { +vfloat16m4_t test_vloxei32_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + const _Float16 *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxei32_v_f16m4_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei32_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2_t test_vloxei32_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_f32mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei32_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1_t test_vloxei32_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_f32m1_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei32_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2_t test_vloxei32_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_f32m2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei32_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float *rs1, vuint32m4_t rs2, size_t vl) { +vfloat32m4_t test_vloxei32_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxei32_v_f32m4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei32_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, const float *rs1, vuint32m8_t rs2, size_t vl) { +vfloat32m8_t test_vloxei32_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + const float *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxei32_v_f32m8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei32_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1_t test_vloxei32_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_f64m1_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei32_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2_t test_vloxei32_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_f64m2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei32_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const double *rs1, vuint32m2_t rs2, size_t vl) { +vfloat64m4_t test_vloxei32_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_f64m4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei32_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const double *rs1, vuint32m4_t rs2, size_t vl) { +vfloat64m8_t test_vloxei32_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxei32_v_f64m8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei32_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8_t test_vloxei32_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + const int8_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_i8mf8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei32_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4_t test_vloxei32_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_i8mf4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei32_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2_t test_vloxei32_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_i8mf2_tum(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei32_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1_t test_vloxei32_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxei32_v_i8m1_tum(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vloxei32_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2_t test_vloxei32_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vloxei32_v_i8m2_tum(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei32_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4_t test_vloxei32_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_i16mf4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei32_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2_t test_vloxei32_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_i16mf2_tum(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei32_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1_t test_vloxei32_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_i16m1_tum(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei32_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2_t test_vloxei32_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxei32_v_i16m2_tum(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vloxei32_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { +vint16m4_t test_vloxei32_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, + const int16_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxei32_v_i16m4_tum(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei32_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2_t test_vloxei32_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_i32mf2_tum(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei32_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1_t test_vloxei32_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_i32m1_tum(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei32_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2_t test_vloxei32_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_i32m2_tum(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei32_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { +vint32m4_t test_vloxei32_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxei32_v_i32m4_tum(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vloxei32_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint32m8_t rs2, size_t vl) { +vint32m8_t test_vloxei32_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, + const int32_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxei32_v_i32m8_tum(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei32_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1_t test_vloxei32_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_i64m1_tum(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei32_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2_t test_vloxei32_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_i64m2_tum(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei32_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { +vint64m4_t test_vloxei32_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_i64m4_tum(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei32_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint32m4_t rs2, size_t vl) { +vint64m8_t test_vloxei32_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxei32_v_i64m8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei32_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8_t test_vloxei32_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_u8mf8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei32_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4_t test_vloxei32_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_u8mf4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei32_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2_t test_vloxei32_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_u8mf2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei32_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1_t test_vloxei32_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxei32_v_u8m1_tum(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei32_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2_t test_vloxei32_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, + const uint8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxei32_v_u8m2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei32_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4_t test_vloxei32_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_u16mf4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei32_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2_t test_vloxei32_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_u16mf2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei32_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1_t test_vloxei32_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_u16m1_tum(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei32_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2_t test_vloxei32_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxei32_v_u16m2_tum(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei32_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint16m4_t test_vloxei32_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + const uint16_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxei32_v_u16m4_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei32_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2_t test_vloxei32_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_u32mf2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei32_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1_t test_vloxei32_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_u32m1_tum(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei32_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2_t test_vloxei32_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_u32m2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei32_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint32m4_t test_vloxei32_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxei32_v_u32m4_tum(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei32_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint32m8_t test_vloxei32_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + const uint32_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxei32_v_u32m8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei32_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1_t test_vloxei32_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_u64m1_tum(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei32_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2_t test_vloxei32_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_u64m2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei32_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint64m4_t test_vloxei32_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_u64m4_tum(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei32_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint64m8_t test_vloxei32_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxei32_v_u64m8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei32_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4_t test_vloxei32_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_f16mf4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei32_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2_t test_vloxei32_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_f16mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei32_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1_t test_vloxei32_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_f16m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei32_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2_t test_vloxei32_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxei32_v_f16m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei32_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, const _Float16 *rs1, vuint32m8_t rs2, size_t vl) { +vfloat16m4_t test_vloxei32_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + const _Float16 *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxei32_v_f16m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei32_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2_t test_vloxei32_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_f32mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei32_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1_t test_vloxei32_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_f32m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei32_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2_t test_vloxei32_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_f32m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei32_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float *rs1, vuint32m4_t rs2, size_t vl) { +vfloat32m4_t test_vloxei32_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxei32_v_f32m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei32_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, const float *rs1, vuint32m8_t rs2, size_t vl) { +vfloat32m8_t test_vloxei32_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + const float *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxei32_v_f32m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei32_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1_t test_vloxei32_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_f64m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei32_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2_t test_vloxei32_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_f64m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei32_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const double *rs1, vuint32m2_t rs2, size_t vl) { +vfloat64m4_t test_vloxei32_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_f64m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei32_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const double *rs1, vuint32m4_t rs2, size_t vl) { +vfloat64m8_t test_vloxei32_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxei32_v_f64m8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei32_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8_t test_vloxei32_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + const int8_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_i8mf8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei32_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4_t test_vloxei32_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_i8mf4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei32_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2_t test_vloxei32_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_i8mf2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei32_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1_t test_vloxei32_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxei32_v_i8m1_tumu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vloxei32_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2_t test_vloxei32_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, + const int8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxei32_v_i8m2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei32_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4_t test_vloxei32_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_i16mf4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei32_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2_t test_vloxei32_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_i16mf2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei32_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1_t test_vloxei32_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_i16m1_tumu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei32_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2_t test_vloxei32_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxei32_v_i16m2_tumu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vloxei32_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { +vint16m4_t test_vloxei32_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, + const int16_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxei32_v_i16m4_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei32_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2_t test_vloxei32_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_i32mf2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei32_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1_t test_vloxei32_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_i32m1_tumu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei32_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2_t test_vloxei32_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_i32m2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei32_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { +vint32m4_t test_vloxei32_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxei32_v_i32m4_tumu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vloxei32_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint32m8_t rs2, size_t vl) { +vint32m8_t test_vloxei32_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, + const int32_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxei32_v_i32m8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei32_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1_t test_vloxei32_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_i64m1_tumu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei32_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2_t test_vloxei32_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_i64m2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei32_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { +vint64m4_t test_vloxei32_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_i64m4_tumu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei32_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint32m4_t rs2, size_t vl) { +vint64m8_t test_vloxei32_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxei32_v_i64m8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei32_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8_t test_vloxei32_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_u8mf8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei32_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4_t test_vloxei32_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_u8mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei32_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2_t test_vloxei32_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_u8mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei32_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1_t test_vloxei32_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxei32_v_u8m1_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei32_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2_t test_vloxei32_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + const uint8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxei32_v_u8m2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei32_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4_t test_vloxei32_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_u16mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei32_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2_t test_vloxei32_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_u16mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei32_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1_t test_vloxei32_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_u16m1_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei32_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2_t test_vloxei32_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxei32_v_u16m2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei32_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint16m4_t test_vloxei32_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + const uint16_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxei32_v_u16m4_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei32_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2_t test_vloxei32_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_u32mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei32_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1_t test_vloxei32_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_u32m1_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei32_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2_t test_vloxei32_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_u32m2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei32_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint32m4_t test_vloxei32_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxei32_v_u32m4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei32_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint32m8_t test_vloxei32_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + const uint32_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxei32_v_u32m8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei32_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1_t test_vloxei32_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_u64m1_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei32_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2_t test_vloxei32_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_u64m2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei32_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint64m4_t test_vloxei32_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_u64m4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei32_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint64m8_t test_vloxei32_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxei32_v_u64m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei32_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4_t test_vloxei32_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_f16mf4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei32_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2_t test_vloxei32_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_f16mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei32_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1_t test_vloxei32_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_f16m1_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei32_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2_t test_vloxei32_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxei32_v_f16m2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei32_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, const _Float16 *rs1, vuint32m8_t rs2, size_t vl) { +vfloat16m4_t test_vloxei32_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + const _Float16 *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxei32_v_f16m4_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei32_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2_t test_vloxei32_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_f32mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei32_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1_t test_vloxei32_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_f32m1_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei32_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2_t test_vloxei32_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_f32m2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei32_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float *rs1, vuint32m4_t rs2, size_t vl) { +vfloat32m4_t test_vloxei32_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxei32_v_f32m4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei32_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, const float *rs1, vuint32m8_t rs2, size_t vl) { +vfloat32m8_t test_vloxei32_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + const float *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxei32_v_f32m8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei32_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1_t test_vloxei32_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_f64m1_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei32_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2_t test_vloxei32_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_f64m2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei32_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const double *rs1, vuint32m2_t rs2, size_t vl) { +vfloat64m4_t test_vloxei32_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_f64m4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei32_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const double *rs1, vuint32m4_t rs2, size_t vl) { +vfloat64m8_t test_vloxei32_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxei32_v_f64m8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei32_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8_t test_vloxei32_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, + const int8_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_i8mf8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei32_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4_t test_vloxei32_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_i8mf4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei32_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2_t test_vloxei32_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_i8mf2_mu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei32_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1_t test_vloxei32_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxei32_v_i8m1_mu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vloxei32_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2_t test_vloxei32_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vloxei32_v_i8m2_mu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei32_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4_t test_vloxei32_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_i16mf4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei32_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2_t test_vloxei32_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_i16mf2_mu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei32_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1_t test_vloxei32_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_i16m1_mu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei32_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2_t test_vloxei32_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxei32_v_i16m2_mu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vloxei32_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { +vint16m4_t test_vloxei32_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, + const int16_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxei32_v_i16m4_mu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei32_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2_t test_vloxei32_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_i32mf2_mu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei32_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1_t test_vloxei32_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_i32m1_mu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei32_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2_t test_vloxei32_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_i32m2_mu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei32_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { +vint32m4_t test_vloxei32_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxei32_v_i32m4_mu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vloxei32_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint32m8_t rs2, size_t vl) { +vint32m8_t test_vloxei32_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, + const int32_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxei32_v_i32m8_mu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei32_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1_t test_vloxei32_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_i64m1_mu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei32_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2_t test_vloxei32_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_i64m2_mu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei32_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { +vint64m4_t test_vloxei32_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_i64m4_mu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei32_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint32m4_t rs2, size_t vl) { +vint64m8_t test_vloxei32_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxei32_v_i64m8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei32_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8_t test_vloxei32_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_u8mf8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei32_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4_t test_vloxei32_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_u8mf4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei32_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2_t test_vloxei32_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_u8mf2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei32_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1_t test_vloxei32_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxei32_v_u8m1_mu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei32_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2_t test_vloxei32_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, + const uint8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxei32_v_u8m2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei32_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4_t test_vloxei32_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_u16mf4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei32_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2_t test_vloxei32_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_u16mf2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei32_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1_t test_vloxei32_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_u16m1_mu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei32_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2_t test_vloxei32_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxei32_v_u16m2_mu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei32_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint16m4_t test_vloxei32_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + const uint16_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxei32_v_u16m4_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei32_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2_t test_vloxei32_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_u32mf2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei32_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1_t test_vloxei32_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_u32m1_mu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei32_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2_t test_vloxei32_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_u32m2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei32_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint32m4_t test_vloxei32_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxei32_v_u32m4_mu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei32_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint32m8_t test_vloxei32_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + const uint32_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxei32_v_u32m8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei32_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1_t test_vloxei32_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxei32_v_u64m1_mu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei32_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2_t test_vloxei32_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxei32_v_u64m2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei32_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint64m4_t test_vloxei32_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxei32_v_u64m4_mu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei32_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint64m8_t test_vloxei32_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxei32_v_u64m8_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxei64.c b/auto-generated/policy_funcs/llvm-api-tests/vloxei64.c index 008d38156..3df3a9aca 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxei64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxei64.c @@ -1,711 +1,1017 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vloxei64_v_f16mf4_tu(vfloat16mf4_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4_t test_vloxei64_v_f16mf4_tu(vfloat16mf4_t vd, const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxei64_v_f16mf4_tu(vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei64_v_f16mf2_tu(vfloat16mf2_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2_t test_vloxei64_v_f16mf2_tu(vfloat16mf2_t vd, const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxei64_v_f16mf2_tu(vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei64_v_f16m1_tu(vfloat16m1_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1_t test_vloxei64_v_f16m1_tu(vfloat16m1_t vd, const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxei64_v_f16m1_tu(vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei64_v_f16m2_tu(vfloat16m2_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2_t test_vloxei64_v_f16m2_tu(vfloat16m2_t vd, const _Float16 *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxei64_v_f16m2_tu(vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei64_v_f32mf2_tu(vfloat32mf2_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2_t test_vloxei64_v_f32mf2_tu(vfloat32mf2_t vd, const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxei64_v_f32mf2_tu(vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei64_v_f32m1_tu(vfloat32m1_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1_t test_vloxei64_v_f32m1_tu(vfloat32m1_t vd, const float *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxei64_v_f32m1_tu(vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei64_v_f32m2_tu(vfloat32m2_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2_t test_vloxei64_v_f32m2_tu(vfloat32m2_t vd, const float *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxei64_v_f32m2_tu(vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei64_v_f32m4_tu(vfloat32m4_t vd, const float *rs1, vuint64m8_t rs2, size_t vl) { +vfloat32m4_t test_vloxei64_v_f32m4_tu(vfloat32m4_t vd, const float *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxei64_v_f32m4_tu(vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei64_v_f64m1_tu(vfloat64m1_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1_t test_vloxei64_v_f64m1_tu(vfloat64m1_t vd, const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxei64_v_f64m1_tu(vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei64_v_f64m2_tu(vfloat64m2_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2_t test_vloxei64_v_f64m2_tu(vfloat64m2_t vd, const double *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxei64_v_f64m2_tu(vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei64_v_f64m4_tu(vfloat64m4_t vd, const double *rs1, vuint64m4_t rs2, size_t vl) { +vfloat64m4_t test_vloxei64_v_f64m4_tu(vfloat64m4_t vd, const double *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxei64_v_f64m4_tu(vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei64_v_f64m8_tu(vfloat64m8_t vd, const double *rs1, vuint64m8_t rs2, size_t vl) { +vfloat64m8_t test_vloxei64_v_f64m8_tu(vfloat64m8_t vd, const double *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxei64_v_f64m8_tu(vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei64_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8_t test_vloxei64_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxei64_v_i8mf8_tu(vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei64_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4_t test_vloxei64_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxei64_v_i8mf4_tu(vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei64_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2_t test_vloxei64_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxei64_v_i8mf2_tu(vd, rs1, rs2, vl); } -vint8m1_t test_vloxei64_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1_t test_vloxei64_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxei64_v_i8m1_tu(vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei64_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4_t test_vloxei64_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxei64_v_i16mf4_tu(vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei64_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2_t test_vloxei64_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxei64_v_i16mf2_tu(vd, rs1, rs2, vl); } -vint16m1_t test_vloxei64_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1_t test_vloxei64_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxei64_v_i16m1_tu(vd, rs1, rs2, vl); } -vint16m2_t test_vloxei64_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2_t test_vloxei64_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxei64_v_i16m2_tu(vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei64_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2_t test_vloxei64_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxei64_v_i32mf2_tu(vd, rs1, rs2, vl); } -vint32m1_t test_vloxei64_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1_t test_vloxei64_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxei64_v_i32m1_tu(vd, rs1, rs2, vl); } -vint32m2_t test_vloxei64_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2_t test_vloxei64_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxei64_v_i32m2_tu(vd, rs1, rs2, vl); } -vint32m4_t test_vloxei64_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { +vint32m4_t test_vloxei64_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxei64_v_i32m4_tu(vd, rs1, rs2, vl); } -vint64m1_t test_vloxei64_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1_t test_vloxei64_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxei64_v_i64m1_tu(vd, rs1, rs2, vl); } -vint64m2_t test_vloxei64_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2_t test_vloxei64_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxei64_v_i64m2_tu(vd, rs1, rs2, vl); } -vint64m4_t test_vloxei64_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { +vint64m4_t test_vloxei64_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxei64_v_i64m4_tu(vd, rs1, rs2, vl); } -vint64m8_t test_vloxei64_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, vuint64m8_t rs2, size_t vl) { +vint64m8_t test_vloxei64_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxei64_v_i64m8_tu(vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei64_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8_t test_vloxei64_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxei64_v_u8mf8_tu(vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei64_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4_t test_vloxei64_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxei64_v_u8mf4_tu(vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei64_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2_t test_vloxei64_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxei64_v_u8mf2_tu(vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei64_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1_t test_vloxei64_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxei64_v_u8m1_tu(vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei64_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4_t test_vloxei64_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxei64_v_u16mf4_tu(vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei64_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2_t test_vloxei64_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxei64_v_u16mf2_tu(vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei64_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1_t test_vloxei64_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxei64_v_u16m1_tu(vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei64_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2_t test_vloxei64_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxei64_v_u16m2_tu(vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei64_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2_t test_vloxei64_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxei64_v_u32mf2_tu(vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei64_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1_t test_vloxei64_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxei64_v_u32m1_tu(vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei64_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2_t test_vloxei64_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxei64_v_u32m2_tu(vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei64_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint32m4_t test_vloxei64_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxei64_v_u32m4_tu(vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei64_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1_t test_vloxei64_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxei64_v_u64m1_tu(vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei64_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2_t test_vloxei64_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxei64_v_u64m2_tu(vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei64_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint64m4_t test_vloxei64_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxei64_v_u64m4_tu(vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei64_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint64m8_t test_vloxei64_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxei64_v_u64m8_tu(vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei64_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4_t test_vloxei64_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_f16mf4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei64_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2_t test_vloxei64_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_f16mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei64_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1_t test_vloxei64_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_f16m1_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei64_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2_t test_vloxei64_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxei64_v_f16m2_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei64_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2_t test_vloxei64_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_f32mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei64_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1_t test_vloxei64_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_f32m1_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei64_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2_t test_vloxei64_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_f32m2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei64_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float *rs1, vuint64m8_t rs2, size_t vl) { +vfloat32m4_t test_vloxei64_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxei64_v_f32m4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei64_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1_t test_vloxei64_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_f64m1_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei64_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2_t test_vloxei64_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_f64m2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei64_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const double *rs1, vuint64m4_t rs2, size_t vl) { +vfloat64m4_t test_vloxei64_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_f64m4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei64_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const double *rs1, vuint64m8_t rs2, size_t vl) { +vfloat64m8_t test_vloxei64_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxei64_v_f64m8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei64_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8_t test_vloxei64_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_i8mf8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei64_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4_t test_vloxei64_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_i8mf4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei64_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2_t test_vloxei64_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_i8mf2_tum(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei64_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1_t test_vloxei64_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxei64_v_i8m1_tum(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei64_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4_t test_vloxei64_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_i16mf4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei64_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2_t test_vloxei64_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_i16mf2_tum(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei64_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1_t test_vloxei64_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_i16m1_tum(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei64_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2_t test_vloxei64_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxei64_v_i16m2_tum(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei64_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2_t test_vloxei64_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_i32mf2_tum(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei64_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1_t test_vloxei64_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_i32m1_tum(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei64_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2_t test_vloxei64_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_i32m2_tum(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei64_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { +vint32m4_t test_vloxei64_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxei64_v_i32m4_tum(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei64_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1_t test_vloxei64_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_i64m1_tum(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei64_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2_t test_vloxei64_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_i64m2_tum(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei64_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { +vint64m4_t test_vloxei64_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_i64m4_tum(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei64_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint64m8_t rs2, size_t vl) { +vint64m8_t test_vloxei64_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxei64_v_i64m8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei64_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8_t test_vloxei64_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_u8mf8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei64_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4_t test_vloxei64_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_u8mf4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei64_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2_t test_vloxei64_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_u8mf2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei64_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1_t test_vloxei64_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxei64_v_u8m1_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei64_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4_t test_vloxei64_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_u16mf4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei64_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2_t test_vloxei64_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_u16mf2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei64_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1_t test_vloxei64_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_u16m1_tum(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei64_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2_t test_vloxei64_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxei64_v_u16m2_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei64_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2_t test_vloxei64_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_u32mf2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei64_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1_t test_vloxei64_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_u32m1_tum(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei64_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2_t test_vloxei64_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_u32m2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei64_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint32m4_t test_vloxei64_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxei64_v_u32m4_tum(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei64_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1_t test_vloxei64_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_u64m1_tum(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei64_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2_t test_vloxei64_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_u64m2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei64_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint64m4_t test_vloxei64_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_u64m4_tum(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei64_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint64m8_t test_vloxei64_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxei64_v_u64m8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei64_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4_t test_vloxei64_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_f16mf4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei64_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2_t test_vloxei64_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_f16mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei64_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1_t test_vloxei64_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_f16m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei64_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2_t test_vloxei64_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxei64_v_f16m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei64_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2_t test_vloxei64_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_f32mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei64_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1_t test_vloxei64_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_f32m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei64_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2_t test_vloxei64_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_f32m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei64_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float *rs1, vuint64m8_t rs2, size_t vl) { +vfloat32m4_t test_vloxei64_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxei64_v_f32m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei64_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1_t test_vloxei64_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_f64m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei64_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2_t test_vloxei64_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_f64m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei64_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const double *rs1, vuint64m4_t rs2, size_t vl) { +vfloat64m4_t test_vloxei64_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_f64m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei64_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const double *rs1, vuint64m8_t rs2, size_t vl) { +vfloat64m8_t test_vloxei64_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxei64_v_f64m8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei64_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8_t test_vloxei64_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_i8mf8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei64_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4_t test_vloxei64_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_i8mf4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei64_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2_t test_vloxei64_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_i8mf2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei64_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1_t test_vloxei64_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxei64_v_i8m1_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei64_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4_t test_vloxei64_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_i16mf4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei64_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2_t test_vloxei64_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_i16mf2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei64_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1_t test_vloxei64_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_i16m1_tumu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei64_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2_t test_vloxei64_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxei64_v_i16m2_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei64_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2_t test_vloxei64_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_i32mf2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei64_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1_t test_vloxei64_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_i32m1_tumu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei64_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2_t test_vloxei64_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_i32m2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei64_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { +vint32m4_t test_vloxei64_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxei64_v_i32m4_tumu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei64_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1_t test_vloxei64_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_i64m1_tumu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei64_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2_t test_vloxei64_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_i64m2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei64_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { +vint64m4_t test_vloxei64_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_i64m4_tumu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei64_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint64m8_t rs2, size_t vl) { +vint64m8_t test_vloxei64_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxei64_v_i64m8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei64_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8_t test_vloxei64_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_u8mf8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei64_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4_t test_vloxei64_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_u8mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei64_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2_t test_vloxei64_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_u8mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei64_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1_t test_vloxei64_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxei64_v_u8m1_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei64_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4_t test_vloxei64_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_u16mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei64_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2_t test_vloxei64_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_u16mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei64_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1_t test_vloxei64_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_u16m1_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei64_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2_t test_vloxei64_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxei64_v_u16m2_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei64_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2_t test_vloxei64_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_u32mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei64_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1_t test_vloxei64_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_u32m1_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei64_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2_t test_vloxei64_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_u32m2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei64_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint32m4_t test_vloxei64_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxei64_v_u32m4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei64_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1_t test_vloxei64_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_u64m1_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei64_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2_t test_vloxei64_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_u64m2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei64_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint64m4_t test_vloxei64_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_u64m4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei64_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint64m8_t test_vloxei64_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxei64_v_u64m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei64_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4_t test_vloxei64_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_f16mf4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei64_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2_t test_vloxei64_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_f16mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei64_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1_t test_vloxei64_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_f16m1_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei64_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2_t test_vloxei64_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxei64_v_f16m2_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei64_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2_t test_vloxei64_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_f32mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei64_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1_t test_vloxei64_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_f32m1_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei64_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2_t test_vloxei64_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_f32m2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei64_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float *rs1, vuint64m8_t rs2, size_t vl) { +vfloat32m4_t test_vloxei64_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxei64_v_f32m4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei64_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1_t test_vloxei64_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_f64m1_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei64_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2_t test_vloxei64_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_f64m2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei64_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const double *rs1, vuint64m4_t rs2, size_t vl) { +vfloat64m4_t test_vloxei64_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_f64m4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei64_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const double *rs1, vuint64m8_t rs2, size_t vl) { +vfloat64m8_t test_vloxei64_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxei64_v_f64m8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei64_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8_t test_vloxei64_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_i8mf8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei64_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4_t test_vloxei64_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_i8mf4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei64_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2_t test_vloxei64_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_i8mf2_mu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei64_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1_t test_vloxei64_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxei64_v_i8m1_mu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei64_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4_t test_vloxei64_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_i16mf4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei64_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2_t test_vloxei64_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_i16mf2_mu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei64_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1_t test_vloxei64_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_i16m1_mu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei64_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2_t test_vloxei64_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxei64_v_i16m2_mu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei64_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2_t test_vloxei64_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_i32mf2_mu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei64_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1_t test_vloxei64_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_i32m1_mu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei64_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2_t test_vloxei64_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_i32m2_mu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei64_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { +vint32m4_t test_vloxei64_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxei64_v_i32m4_mu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei64_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1_t test_vloxei64_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_i64m1_mu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei64_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2_t test_vloxei64_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_i64m2_mu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei64_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { +vint64m4_t test_vloxei64_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_i64m4_mu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei64_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint64m8_t rs2, size_t vl) { +vint64m8_t test_vloxei64_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxei64_v_i64m8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei64_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8_t test_vloxei64_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_u8mf8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei64_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4_t test_vloxei64_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_u8mf4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei64_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2_t test_vloxei64_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_u8mf2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei64_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1_t test_vloxei64_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxei64_v_u8m1_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei64_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4_t test_vloxei64_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_u16mf4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei64_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2_t test_vloxei64_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_u16mf2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei64_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1_t test_vloxei64_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_u16m1_mu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei64_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2_t test_vloxei64_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxei64_v_u16m2_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei64_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2_t test_vloxei64_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_u32mf2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei64_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1_t test_vloxei64_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_u32m1_mu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei64_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2_t test_vloxei64_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_u32m2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei64_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint32m4_t test_vloxei64_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxei64_v_u32m4_mu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei64_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1_t test_vloxei64_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxei64_v_u64m1_mu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei64_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2_t test_vloxei64_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxei64_v_u64m2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei64_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint64m4_t test_vloxei64_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxei64_v_u64m4_mu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei64_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint64m8_t test_vloxei64_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxei64_v_u64m8_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxei8.c b/auto-generated/policy_funcs/llvm-api-tests/vloxei8.c index 1af6991d2..fc2dccc91 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxei8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxei8.c @@ -1,951 +1,1352 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vloxei8_v_f16mf4_tu(vfloat16mf4_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4_t test_vloxei8_v_f16mf4_tu(vfloat16mf4_t vd, const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxei8_v_f16mf4_tu(vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei8_v_f16mf2_tu(vfloat16mf2_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2_t test_vloxei8_v_f16mf2_tu(vfloat16mf2_t vd, const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxei8_v_f16mf2_tu(vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei8_v_f16m1_tu(vfloat16m1_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1_t test_vloxei8_v_f16m1_tu(vfloat16m1_t vd, const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxei8_v_f16m1_tu(vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei8_v_f16m2_tu(vfloat16m2_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2_t test_vloxei8_v_f16m2_tu(vfloat16m2_t vd, const _Float16 *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxei8_v_f16m2_tu(vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei8_v_f16m4_tu(vfloat16m4_t vd, const _Float16 *rs1, vuint8m2_t rs2, size_t vl) { +vfloat16m4_t test_vloxei8_v_f16m4_tu(vfloat16m4_t vd, const _Float16 *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vloxei8_v_f16m4_tu(vd, rs1, rs2, vl); } -vfloat16m8_t test_vloxei8_v_f16m8_tu(vfloat16m8_t vd, const _Float16 *rs1, vuint8m4_t rs2, size_t vl) { +vfloat16m8_t test_vloxei8_v_f16m8_tu(vfloat16m8_t vd, const _Float16 *rs1, + vuint8m4_t rs2, size_t vl) { return __riscv_vloxei8_v_f16m8_tu(vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei8_v_f32mf2_tu(vfloat32mf2_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2_t test_vloxei8_v_f32mf2_tu(vfloat32mf2_t vd, const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxei8_v_f32mf2_tu(vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei8_v_f32m1_tu(vfloat32m1_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1_t test_vloxei8_v_f32m1_tu(vfloat32m1_t vd, const float *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxei8_v_f32m1_tu(vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei8_v_f32m2_tu(vfloat32m2_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2_t test_vloxei8_v_f32m2_tu(vfloat32m2_t vd, const float *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxei8_v_f32m2_tu(vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei8_v_f32m4_tu(vfloat32m4_t vd, const float *rs1, vuint8m1_t rs2, size_t vl) { +vfloat32m4_t test_vloxei8_v_f32m4_tu(vfloat32m4_t vd, const float *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxei8_v_f32m4_tu(vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei8_v_f32m8_tu(vfloat32m8_t vd, const float *rs1, vuint8m2_t rs2, size_t vl) { +vfloat32m8_t test_vloxei8_v_f32m8_tu(vfloat32m8_t vd, const float *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vloxei8_v_f32m8_tu(vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei8_v_f64m1_tu(vfloat64m1_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1_t test_vloxei8_v_f64m1_tu(vfloat64m1_t vd, const double *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxei8_v_f64m1_tu(vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei8_v_f64m2_tu(vfloat64m2_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2_t test_vloxei8_v_f64m2_tu(vfloat64m2_t vd, const double *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxei8_v_f64m2_tu(vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei8_v_f64m4_tu(vfloat64m4_t vd, const double *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat64m4_t test_vloxei8_v_f64m4_tu(vfloat64m4_t vd, const double *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxei8_v_f64m4_tu(vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei8_v_f64m8_tu(vfloat64m8_t vd, const double *rs1, vuint8m1_t rs2, size_t vl) { +vfloat64m8_t test_vloxei8_v_f64m8_tu(vfloat64m8_t vd, const double *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxei8_v_f64m8_tu(vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei8_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8_t test_vloxei8_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxei8_v_i8mf8_tu(vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei8_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4_t test_vloxei8_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxei8_v_i8mf4_tu(vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei8_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2_t test_vloxei8_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxei8_v_i8mf2_tu(vd, rs1, rs2, vl); } -vint8m1_t test_vloxei8_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1_t test_vloxei8_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxei8_v_i8m1_tu(vd, rs1, rs2, vl); } -vint8m2_t test_vloxei8_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2_t test_vloxei8_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vloxei8_v_i8m2_tu(vd, rs1, rs2, vl); } -vint8m4_t test_vloxei8_v_i8m4_tu(vint8m4_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { +vint8m4_t test_vloxei8_v_i8m4_tu(vint8m4_t vd, const int8_t *rs1, + vuint8m4_t rs2, size_t vl) { return __riscv_vloxei8_v_i8m4_tu(vd, rs1, rs2, vl); } -vint8m8_t test_vloxei8_v_i8m8_tu(vint8m8_t vd, const int8_t *rs1, vuint8m8_t rs2, size_t vl) { +vint8m8_t test_vloxei8_v_i8m8_tu(vint8m8_t vd, const int8_t *rs1, + vuint8m8_t rs2, size_t vl) { return __riscv_vloxei8_v_i8m8_tu(vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei8_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4_t test_vloxei8_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxei8_v_i16mf4_tu(vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei8_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2_t test_vloxei8_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxei8_v_i16mf2_tu(vd, rs1, rs2, vl); } -vint16m1_t test_vloxei8_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1_t test_vloxei8_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxei8_v_i16m1_tu(vd, rs1, rs2, vl); } -vint16m2_t test_vloxei8_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2_t test_vloxei8_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxei8_v_i16m2_tu(vd, rs1, rs2, vl); } -vint16m4_t test_vloxei8_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { +vint16m4_t test_vloxei8_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vloxei8_v_i16m4_tu(vd, rs1, rs2, vl); } -vint16m8_t test_vloxei8_v_i16m8_tu(vint16m8_t vd, const int16_t *rs1, vuint8m4_t rs2, size_t vl) { +vint16m8_t test_vloxei8_v_i16m8_tu(vint16m8_t vd, const int16_t *rs1, + vuint8m4_t rs2, size_t vl) { return __riscv_vloxei8_v_i16m8_tu(vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei8_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2_t test_vloxei8_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxei8_v_i32mf2_tu(vd, rs1, rs2, vl); } -vint32m1_t test_vloxei8_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1_t test_vloxei8_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxei8_v_i32m1_tu(vd, rs1, rs2, vl); } -vint32m2_t test_vloxei8_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2_t test_vloxei8_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxei8_v_i32m2_tu(vd, rs1, rs2, vl); } -vint32m4_t test_vloxei8_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { +vint32m4_t test_vloxei8_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxei8_v_i32m4_tu(vd, rs1, rs2, vl); } -vint32m8_t test_vloxei8_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, vuint8m2_t rs2, size_t vl) { +vint32m8_t test_vloxei8_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vloxei8_v_i32m8_tu(vd, rs1, rs2, vl); } -vint64m1_t test_vloxei8_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1_t test_vloxei8_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxei8_v_i64m1_tu(vd, rs1, rs2, vl); } -vint64m2_t test_vloxei8_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2_t test_vloxei8_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxei8_v_i64m2_tu(vd, rs1, rs2, vl); } -vint64m4_t test_vloxei8_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint64m4_t test_vloxei8_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxei8_v_i64m4_tu(vd, rs1, rs2, vl); } -vint64m8_t test_vloxei8_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, vuint8m1_t rs2, size_t vl) { +vint64m8_t test_vloxei8_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxei8_v_i64m8_tu(vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei8_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8_t test_vloxei8_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxei8_v_u8mf8_tu(vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei8_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4_t test_vloxei8_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxei8_v_u8mf4_tu(vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei8_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2_t test_vloxei8_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxei8_v_u8mf2_tu(vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei8_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1_t test_vloxei8_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxei8_v_u8m1_tu(vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei8_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2_t test_vloxei8_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vloxei8_v_u8m2_tu(vd, rs1, rs2, vl); } -vuint8m4_t test_vloxei8_v_u8m4_tu(vuint8m4_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { +vuint8m4_t test_vloxei8_v_u8m4_tu(vuint8m4_t vd, const uint8_t *rs1, + vuint8m4_t rs2, size_t vl) { return __riscv_vloxei8_v_u8m4_tu(vd, rs1, rs2, vl); } -vuint8m8_t test_vloxei8_v_u8m8_tu(vuint8m8_t vd, const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { +vuint8m8_t test_vloxei8_v_u8m8_tu(vuint8m8_t vd, const uint8_t *rs1, + vuint8m8_t rs2, size_t vl) { return __riscv_vloxei8_v_u8m8_tu(vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei8_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4_t test_vloxei8_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxei8_v_u16mf4_tu(vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei8_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2_t test_vloxei8_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxei8_v_u16mf2_tu(vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei8_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1_t test_vloxei8_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxei8_v_u16m1_tu(vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei8_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2_t test_vloxei8_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxei8_v_u16m2_tu(vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei8_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint16m4_t test_vloxei8_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vloxei8_v_u16m4_tu(vd, rs1, rs2, vl); } -vuint16m8_t test_vloxei8_v_u16m8_tu(vuint16m8_t vd, const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { +vuint16m8_t test_vloxei8_v_u16m8_tu(vuint16m8_t vd, const uint16_t *rs1, + vuint8m4_t rs2, size_t vl) { return __riscv_vloxei8_v_u16m8_tu(vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei8_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2_t test_vloxei8_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxei8_v_u32mf2_tu(vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei8_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1_t test_vloxei8_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxei8_v_u32m1_tu(vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei8_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2_t test_vloxei8_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxei8_v_u32m2_tu(vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei8_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint32m4_t test_vloxei8_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxei8_v_u32m4_tu(vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei8_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint32m8_t test_vloxei8_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vloxei8_v_u32m8_tu(vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei8_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1_t test_vloxei8_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxei8_v_u64m1_tu(vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei8_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2_t test_vloxei8_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxei8_v_u64m2_tu(vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei8_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint64m4_t test_vloxei8_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxei8_v_u64m4_tu(vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei8_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint64m8_t test_vloxei8_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxei8_v_u64m8_tu(vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei8_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4_t test_vloxei8_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_f16mf4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei8_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2_t test_vloxei8_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_f16mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei8_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1_t test_vloxei8_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_f16m1_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei8_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2_t test_vloxei8_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxei8_v_f16m2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei8_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, const _Float16 *rs1, vuint8m2_t rs2, size_t vl) { +vfloat16m4_t test_vloxei8_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + const _Float16 *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxei8_v_f16m4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vloxei8_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, const _Float16 *rs1, vuint8m4_t rs2, size_t vl) { +vfloat16m8_t test_vloxei8_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + const _Float16 *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vloxei8_v_f16m8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei8_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2_t test_vloxei8_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_f32mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei8_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1_t test_vloxei8_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_f32m1_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei8_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2_t test_vloxei8_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_f32m2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei8_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float *rs1, vuint8m1_t rs2, size_t vl) { +vfloat32m4_t test_vloxei8_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxei8_v_f32m4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei8_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, const float *rs1, vuint8m2_t rs2, size_t vl) { +vfloat32m8_t test_vloxei8_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + const float *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxei8_v_f32m8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei8_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1_t test_vloxei8_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_f64m1_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei8_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2_t test_vloxei8_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_f64m2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei8_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const double *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat64m4_t test_vloxei8_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_f64m4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei8_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const double *rs1, vuint8m1_t rs2, size_t vl) { +vfloat64m8_t test_vloxei8_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxei8_v_f64m8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei8_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8_t test_vloxei8_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_i8mf8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei8_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4_t test_vloxei8_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_i8mf4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei8_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2_t test_vloxei8_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_i8mf2_tum(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei8_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1_t test_vloxei8_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxei8_v_i8m1_tum(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vloxei8_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2_t test_vloxei8_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vloxei8_v_i8m2_tum(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vloxei8_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { +vint8m4_t test_vloxei8_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, + vuint8m4_t rs2, size_t vl) { return __riscv_vloxei8_v_i8m4_tum(vm, vd, rs1, rs2, vl); } -vint8m8_t test_vloxei8_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, vuint8m8_t rs2, size_t vl) { +vint8m8_t test_vloxei8_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, + vuint8m8_t rs2, size_t vl) { return __riscv_vloxei8_v_i8m8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei8_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4_t test_vloxei8_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_i16mf4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei8_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2_t test_vloxei8_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_i16mf2_tum(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei8_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1_t test_vloxei8_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_i16m1_tum(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei8_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2_t test_vloxei8_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxei8_v_i16m2_tum(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vloxei8_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { +vint16m4_t test_vloxei8_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, + const int16_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxei8_v_i16m4_tum(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vloxei8_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint8m4_t rs2, size_t vl) { +vint16m8_t test_vloxei8_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, + const int16_t *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vloxei8_v_i16m8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei8_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2_t test_vloxei8_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_i32mf2_tum(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei8_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1_t test_vloxei8_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_i32m1_tum(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei8_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2_t test_vloxei8_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_i32m2_tum(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei8_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { +vint32m4_t test_vloxei8_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxei8_v_i32m4_tum(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vloxei8_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint8m2_t rs2, size_t vl) { +vint32m8_t test_vloxei8_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, + const int32_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxei8_v_i32m8_tum(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei8_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1_t test_vloxei8_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_i64m1_tum(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei8_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2_t test_vloxei8_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_i64m2_tum(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei8_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint64m4_t test_vloxei8_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_i64m4_tum(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei8_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint8m1_t rs2, size_t vl) { +vint64m8_t test_vloxei8_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxei8_v_i64m8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8_t test_vloxei8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_u8mf8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4_t test_vloxei8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_u8mf4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2_t test_vloxei8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_u8mf2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1_t test_vloxei8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxei8_v_u8m1_tum(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2_t test_vloxei8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, + const uint8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxei8_v_u8m2_tum(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vloxei8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { +vuint8m4_t test_vloxei8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, + const uint8_t *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vloxei8_v_u8m4_tum(vm, vd, rs1, rs2, vl); } -vuint8m8_t test_vloxei8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { +vuint8m8_t test_vloxei8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, + const uint8_t *rs1, vuint8m8_t rs2, + size_t vl) { return __riscv_vloxei8_v_u8m8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4_t test_vloxei8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_u16mf4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2_t test_vloxei8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_u16mf2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1_t test_vloxei8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_u16m1_tum(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2_t test_vloxei8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxei8_v_u16m2_tum(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint16m4_t test_vloxei8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + const uint16_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxei8_v_u16m4_tum(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vloxei8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { +vuint16m8_t test_vloxei8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + const uint16_t *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vloxei8_v_u16m8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2_t test_vloxei8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_u32mf2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1_t test_vloxei8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_u32m1_tum(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2_t test_vloxei8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_u32m2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint32m4_t test_vloxei8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxei8_v_u32m4_tum(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint32m8_t test_vloxei8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + const uint32_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxei8_v_u32m8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1_t test_vloxei8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_u64m1_tum(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2_t test_vloxei8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_u64m2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint64m4_t test_vloxei8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_u64m4_tum(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint64m8_t test_vloxei8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxei8_v_u64m8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei8_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4_t test_vloxei8_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_f16mf4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei8_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2_t test_vloxei8_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_f16mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei8_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1_t test_vloxei8_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_f16m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei8_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2_t test_vloxei8_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxei8_v_f16m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei8_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, const _Float16 *rs1, vuint8m2_t rs2, size_t vl) { +vfloat16m4_t test_vloxei8_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + const _Float16 *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxei8_v_f16m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vloxei8_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, const _Float16 *rs1, vuint8m4_t rs2, size_t vl) { +vfloat16m8_t test_vloxei8_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + const _Float16 *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vloxei8_v_f16m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei8_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2_t test_vloxei8_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_f32mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei8_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1_t test_vloxei8_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_f32m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei8_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2_t test_vloxei8_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_f32m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei8_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float *rs1, vuint8m1_t rs2, size_t vl) { +vfloat32m4_t test_vloxei8_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxei8_v_f32m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei8_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, const float *rs1, vuint8m2_t rs2, size_t vl) { +vfloat32m8_t test_vloxei8_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + const float *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxei8_v_f32m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei8_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1_t test_vloxei8_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_f64m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei8_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2_t test_vloxei8_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_f64m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei8_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const double *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat64m4_t test_vloxei8_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_f64m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei8_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const double *rs1, vuint8m1_t rs2, size_t vl) { +vfloat64m8_t test_vloxei8_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxei8_v_f64m8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei8_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8_t test_vloxei8_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_i8mf8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei8_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4_t test_vloxei8_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_i8mf4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei8_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2_t test_vloxei8_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_i8mf2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei8_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1_t test_vloxei8_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxei8_v_i8m1_tumu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vloxei8_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2_t test_vloxei8_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vloxei8_v_i8m2_tumu(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vloxei8_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { +vint8m4_t test_vloxei8_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, + vuint8m4_t rs2, size_t vl) { return __riscv_vloxei8_v_i8m4_tumu(vm, vd, rs1, rs2, vl); } -vint8m8_t test_vloxei8_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, vuint8m8_t rs2, size_t vl) { +vint8m8_t test_vloxei8_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, + vuint8m8_t rs2, size_t vl) { return __riscv_vloxei8_v_i8m8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei8_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4_t test_vloxei8_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_i16mf4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei8_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2_t test_vloxei8_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_i16mf2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei8_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1_t test_vloxei8_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_i16m1_tumu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei8_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2_t test_vloxei8_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxei8_v_i16m2_tumu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vloxei8_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { +vint16m4_t test_vloxei8_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, + const int16_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxei8_v_i16m4_tumu(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vloxei8_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint8m4_t rs2, size_t vl) { +vint16m8_t test_vloxei8_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, + const int16_t *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vloxei8_v_i16m8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei8_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2_t test_vloxei8_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_i32mf2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei8_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1_t test_vloxei8_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_i32m1_tumu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei8_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2_t test_vloxei8_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_i32m2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei8_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { +vint32m4_t test_vloxei8_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxei8_v_i32m4_tumu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vloxei8_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint8m2_t rs2, size_t vl) { +vint32m8_t test_vloxei8_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, + const int32_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxei8_v_i32m8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei8_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1_t test_vloxei8_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_i64m1_tumu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei8_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2_t test_vloxei8_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_i64m2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei8_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint64m4_t test_vloxei8_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_i64m4_tumu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei8_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint8m1_t rs2, size_t vl) { +vint64m8_t test_vloxei8_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxei8_v_i64m8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8_t test_vloxei8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_u8mf8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4_t test_vloxei8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_u8mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2_t test_vloxei8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_u8mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1_t test_vloxei8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxei8_v_u8m1_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2_t test_vloxei8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + const uint8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxei8_v_u8m2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vloxei8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { +vuint8m4_t test_vloxei8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + const uint8_t *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vloxei8_v_u8m4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m8_t test_vloxei8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { +vuint8m8_t test_vloxei8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, + const uint8_t *rs1, vuint8m8_t rs2, + size_t vl) { return __riscv_vloxei8_v_u8m8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4_t test_vloxei8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_u16mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2_t test_vloxei8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_u16mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1_t test_vloxei8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_u16m1_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2_t test_vloxei8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxei8_v_u16m2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint16m4_t test_vloxei8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + const uint16_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxei8_v_u16m4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vloxei8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { +vuint16m8_t test_vloxei8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + const uint16_t *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vloxei8_v_u16m8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2_t test_vloxei8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_u32mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1_t test_vloxei8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_u32m1_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2_t test_vloxei8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_u32m2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint32m4_t test_vloxei8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxei8_v_u32m4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint32m8_t test_vloxei8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + const uint32_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxei8_v_u32m8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1_t test_vloxei8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_u64m1_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2_t test_vloxei8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_u64m2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint64m4_t test_vloxei8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_u64m4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint64m8_t test_vloxei8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxei8_v_u64m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vloxei8_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4_t test_vloxei8_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_f16mf4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vloxei8_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2_t test_vloxei8_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_f16mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vloxei8_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1_t test_vloxei8_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_f16m1_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vloxei8_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2_t test_vloxei8_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxei8_v_f16m2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vloxei8_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, const _Float16 *rs1, vuint8m2_t rs2, size_t vl) { +vfloat16m4_t test_vloxei8_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + const _Float16 *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxei8_v_f16m4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vloxei8_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, const _Float16 *rs1, vuint8m4_t rs2, size_t vl) { +vfloat16m8_t test_vloxei8_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + const _Float16 *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vloxei8_v_f16m8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vloxei8_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2_t test_vloxei8_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_f32mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vloxei8_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1_t test_vloxei8_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_f32m1_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vloxei8_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2_t test_vloxei8_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_f32m2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vloxei8_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float *rs1, vuint8m1_t rs2, size_t vl) { +vfloat32m4_t test_vloxei8_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxei8_v_f32m4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vloxei8_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, const float *rs1, vuint8m2_t rs2, size_t vl) { +vfloat32m8_t test_vloxei8_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + const float *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxei8_v_f32m8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vloxei8_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1_t test_vloxei8_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_f64m1_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vloxei8_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2_t test_vloxei8_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_f64m2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vloxei8_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const double *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat64m4_t test_vloxei8_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_f64m4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vloxei8_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const double *rs1, vuint8m1_t rs2, size_t vl) { +vfloat64m8_t test_vloxei8_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxei8_v_f64m8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vloxei8_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8_t test_vloxei8_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_i8mf8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vloxei8_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4_t test_vloxei8_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_i8mf4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vloxei8_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2_t test_vloxei8_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_i8mf2_mu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vloxei8_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1_t test_vloxei8_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxei8_v_i8m1_mu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vloxei8_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2_t test_vloxei8_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vloxei8_v_i8m2_mu(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vloxei8_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { +vint8m4_t test_vloxei8_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, + vuint8m4_t rs2, size_t vl) { return __riscv_vloxei8_v_i8m4_mu(vm, vd, rs1, rs2, vl); } -vint8m8_t test_vloxei8_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, vuint8m8_t rs2, size_t vl) { +vint8m8_t test_vloxei8_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, + vuint8m8_t rs2, size_t vl) { return __riscv_vloxei8_v_i8m8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vloxei8_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4_t test_vloxei8_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_i16mf4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vloxei8_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2_t test_vloxei8_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_i16mf2_mu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vloxei8_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1_t test_vloxei8_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_i16m1_mu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vloxei8_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2_t test_vloxei8_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxei8_v_i16m2_mu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vloxei8_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { +vint16m4_t test_vloxei8_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, + const int16_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxei8_v_i16m4_mu(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vloxei8_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint8m4_t rs2, size_t vl) { +vint16m8_t test_vloxei8_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, + const int16_t *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vloxei8_v_i16m8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vloxei8_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2_t test_vloxei8_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_i32mf2_mu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vloxei8_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1_t test_vloxei8_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_i32m1_mu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vloxei8_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2_t test_vloxei8_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_i32m2_mu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vloxei8_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { +vint32m4_t test_vloxei8_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxei8_v_i32m4_mu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vloxei8_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint8m2_t rs2, size_t vl) { +vint32m8_t test_vloxei8_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, + const int32_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxei8_v_i32m8_mu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vloxei8_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1_t test_vloxei8_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_i64m1_mu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vloxei8_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2_t test_vloxei8_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_i64m2_mu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vloxei8_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint64m4_t test_vloxei8_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_i64m4_mu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vloxei8_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint8m1_t rs2, size_t vl) { +vint64m8_t test_vloxei8_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxei8_v_i64m8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vloxei8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8_t test_vloxei8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_u8mf8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vloxei8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4_t test_vloxei8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_u8mf4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vloxei8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2_t test_vloxei8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_u8mf2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vloxei8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1_t test_vloxei8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxei8_v_u8m1_mu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vloxei8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2_t test_vloxei8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, + const uint8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxei8_v_u8m2_mu(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vloxei8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { +vuint8m4_t test_vloxei8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, + const uint8_t *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vloxei8_v_u8m4_mu(vm, vd, rs1, rs2, vl); } -vuint8m8_t test_vloxei8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { +vuint8m8_t test_vloxei8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, + const uint8_t *rs1, vuint8m8_t rs2, + size_t vl) { return __riscv_vloxei8_v_u8m8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vloxei8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4_t test_vloxei8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_u16mf4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vloxei8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2_t test_vloxei8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_u16mf2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vloxei8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1_t test_vloxei8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_u16m1_mu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vloxei8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2_t test_vloxei8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxei8_v_u16m2_mu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vloxei8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint16m4_t test_vloxei8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + const uint16_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxei8_v_u16m4_mu(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vloxei8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { +vuint16m8_t test_vloxei8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + const uint16_t *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vloxei8_v_u16m8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vloxei8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2_t test_vloxei8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_u32mf2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vloxei8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1_t test_vloxei8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_u32m1_mu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vloxei8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2_t test_vloxei8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_u32m2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vloxei8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint32m4_t test_vloxei8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxei8_v_u32m4_mu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vloxei8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint32m8_t test_vloxei8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + const uint32_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxei8_v_u32m8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vloxei8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1_t test_vloxei8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxei8_v_u64m1_mu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vloxei8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2_t test_vloxei8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxei8_v_u64m2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vloxei8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint64m4_t test_vloxei8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxei8_v_u64m4_mu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vloxei8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint64m8_t test_vloxei8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxei8_v_u64m8_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxseg2ei16.c b/auto-generated/policy_funcs/llvm-api-tests/vloxseg2ei16.c index 0ae0d2c29..5f7c02fb0 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxseg2ei16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxseg2ei16.c @@ -1,775 +1,1153 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tu(vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f16mf4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tu(vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f16mf2x2_tu(vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tu(vfloat16m1x2_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tu(vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f16m1x2_tu(vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tu(vfloat16m2x2_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tu(vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f16m2x2_tu(vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tu(vfloat16m4x2_t vd, const _Float16 *rs1, vuint16m4_t rs2, size_t vl) { +vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tu(vfloat16m4x2_t vd, + const _Float16 *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f16m4x2_tu(vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tu(vfloat32mf2x2_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f32mf2x2_tu(vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tu(vfloat32m1x2_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tu(vfloat32m1x2_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f32m1x2_tu(vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tu(vfloat32m2x2_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tu(vfloat32m2x2_t vd, + const float *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei16_v_f32m2x2_tu(vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tu(vfloat32m4x2_t vd, const float *rs1, vuint16m2_t rs2, size_t vl) { +vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tu(vfloat32m4x2_t vd, + const float *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei16_v_f32m4x2_tu(vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tu(vfloat64m1x2_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tu(vfloat64m1x2_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f64m1x2_tu(vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tu(vfloat64m2x2_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tu(vfloat64m2x2_t vd, + const double *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f64m2x2_tu(vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tu(vfloat64m4x2_t vd, const double *rs1, vuint16m1_t rs2, size_t vl) { +vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tu(vfloat64m4x2_t vd, + const double *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f64m4x2_tu(vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i8mf8x2_tu(vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i8mf4x2_tu(vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i8mf2x2_tu(vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i8m1x2_tu(vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i8m2x2_tu(vd, rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tu(vint8m4x2_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { +vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tu(vint8m4x2_t vd, const int8_t *rs1, + vuint16m8_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i8m4x2_tu(vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tu(vint16mf4x2_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i16mf4x2_tu(vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tu(vint16mf2x2_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i16mf2x2_tu(vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i16m1x2_tu(vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i16m2x2_tu(vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { +vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i16m4x2_tu(vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tu(vint32mf2x2_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i32mf2x2_tu(vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i32m1x2_tu(vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i32m2x2_tu(vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { +vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i32m4x2_tu(vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i64m1x2_tu(vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i64m2x2_tu(vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { +vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i64m4x2_tu(vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tu(vuint8mf8x2_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u8mf8x2_tu(vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tu(vuint8mf4x2_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u8mf4x2_tu(vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tu(vuint8mf2x2_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u8mf2x2_tu(vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u8m1x2_tu(vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u8m2x2_tu(vd, rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tu(vuint8m4x2_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { +vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tu(vuint8m4x2_t vd, const uint8_t *rs1, + vuint16m8_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u8m4x2_tu(vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tu(vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u16mf4x2_tu(vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tu(vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u16mf2x2_tu(vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tu(vuint16m1x2_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u16m1x2_tu(vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tu(vuint16m2x2_t vd, + const uint16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u16m2x2_tu(vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tu(vuint16m4x2_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tu(vuint16m4x2_t vd, + const uint16_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u16m4x2_tu(vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tu(vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u32mf2x2_tu(vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tu(vuint32m1x2_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u32m1x2_tu(vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tu(vuint32m2x2_t vd, + const uint32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u32m2x2_tu(vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tu(vuint32m4x2_t vd, + const uint32_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u32m4x2_tu(vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tu(vuint64m1x2_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u64m1x2_tu(vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tu(vuint64m2x2_t vd, + const uint64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u64m2x2_tu(vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tu(vuint64m4x2_t vd, + const uint64_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u64m4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tum(vbool64_t vm, + vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tum(vbool32_t vm, + vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f16m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f16m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, const _Float16 *rs1, vuint16m4_t rs2, size_t vl) { +vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, + const _Float16 *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f16m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tum(vbool64_t vm, + vfloat32mf2x2_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f32m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f32m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, vuint16m2_t rs2, size_t vl) { +vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f32m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f64m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f64m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, vuint16m1_t rs2, size_t vl) { +vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f64m4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei16_v_i8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei16_v_i8m1x2_tum(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, + const int8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxseg2ei16_v_i8m2x2_tum(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tum(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { +vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tum(vbool2_t vm, vint8m4x2_t vd, + const int8_t *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vloxseg2ei16_v_i8m4x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i16m1x2_tum(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i16m2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { +vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, + const int16_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i16m4x2_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i32m1x2_tum(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i32m2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { +vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i32m4x2_tum(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i64m1x2_tum(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i64m2x2_tum(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { +vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i64m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei16_v_u8m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, + const uint8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxseg2ei16_v_u8m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tum(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { +vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tum(vbool2_t vm, vuint8m4x2_t vd, + const uint8_t *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vloxseg2ei16_v_u8m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u16m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u16m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, + const uint16_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u16m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u32m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u32m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u32m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u64m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u64m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u64m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_tumu(vbool64_t vm, + vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_tumu(vbool32_t vm, + vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, const _Float16 *rs1, vuint16m4_t rs2, size_t vl) { +vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, + const _Float16 *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_tumu(vbool64_t vm, + vfloat32mf2x2_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, vuint16m2_t rs2, size_t vl) { +vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, vuint16m1_t rs2, size_t vl) { +vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei16_v_i8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, + const int8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxseg2ei16_v_i8m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tumu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { +vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_tumu(vbool2_t vm, vint8m4x2_t vd, + const int8_t *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vloxseg2ei16_v_i8m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { +vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, + const int16_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { +vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { +vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, + const uint8_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u8m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tumu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { +vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_tumu(vbool2_t vm, vuint8m4x2_t vd, + const uint8_t *rs1, + vuint16m8_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u8m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_tumu(vbool64_t vm, + vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_tumu(vbool32_t vm, + vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, + const uint16_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_tumu(vbool64_t vm, + vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x2_t test_vloxseg2ei16_v_f16mf4x2_mu(vbool64_t vm, + vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x2_t test_vloxseg2ei16_v_f16mf2x2_mu(vbool32_t vm, + vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x2_t test_vloxseg2ei16_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f16m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2x2_t test_vloxseg2ei16_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f16m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, const _Float16 *rs1, vuint16m4_t rs2, size_t vl) { +vfloat16m4x2_t test_vloxseg2ei16_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, + const _Float16 *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f16m4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x2_t test_vloxseg2ei16_v_f32mf2x2_mu(vbool64_t vm, + vfloat32mf2x2_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x2_t test_vloxseg2ei16_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f32m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2x2_t test_vloxseg2ei16_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei16_v_f32m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, vuint16m2_t rs2, size_t vl) { +vfloat32m4x2_t test_vloxseg2ei16_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei16_v_f32m4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x2_t test_vloxseg2ei16_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f64m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2x2_t test_vloxseg2ei16_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f64m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, vuint16m1_t rs2, size_t vl) { +vfloat64m4x2_t test_vloxseg2ei16_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_f64m4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x2_t test_vloxseg2ei16_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxseg2ei16_v_i8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x2_t test_vloxseg2ei16_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxseg2ei16_v_i8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x2_t test_vloxseg2ei16_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei16_v_i8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x2_t test_vloxseg2ei16_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei16_v_i8m1x2_mu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2x2_t test_vloxseg2ei16_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, + const int8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxseg2ei16_v_i8m2x2_mu(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_mu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { +vint8m4x2_t test_vloxseg2ei16_v_i8m4x2_mu(vbool2_t vm, vint8m4x2_t vd, + const int8_t *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vloxseg2ei16_v_i8m4x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x2_t test_vloxseg2ei16_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x2_t test_vloxseg2ei16_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x2_t test_vloxseg2ei16_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei16_v_i16m1x2_mu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2x2_t test_vloxseg2ei16_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei16_v_i16m2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { +vint16m4x2_t test_vloxseg2ei16_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, + const int16_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxseg2ei16_v_i16m4x2_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x2_t test_vloxseg2ei16_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x2_t test_vloxseg2ei16_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i32m1x2_mu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2x2_t test_vloxseg2ei16_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei16_v_i32m2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { +vint32m4x2_t test_vloxseg2ei16_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei16_v_i32m4x2_mu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x2_t test_vloxseg2ei16_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i64m1x2_mu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2x2_t test_vloxseg2ei16_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_i64m2x2_mu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { +vint64m4x2_t test_vloxseg2ei16_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei16_v_i64m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x2_t test_vloxseg2ei16_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x2_t test_vloxseg2ei16_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x2_t test_vloxseg2ei16_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x2_t test_vloxseg2ei16_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei16_v_u8m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2x2_t test_vloxseg2ei16_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, + const uint8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxseg2ei16_v_u8m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_mu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { +vuint8m4x2_t test_vloxseg2ei16_v_u8m4x2_mu(vbool2_t vm, vuint8m4x2_t vd, + const uint8_t *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vloxseg2ei16_v_u8m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x2_t test_vloxseg2ei16_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x2_t test_vloxseg2ei16_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x2_t test_vloxseg2ei16_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u16m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2x2_t test_vloxseg2ei16_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u16m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint16m4x2_t test_vloxseg2ei16_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, + const uint16_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u16m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x2_t test_vloxseg2ei16_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x2_t test_vloxseg2ei16_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u32m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2x2_t test_vloxseg2ei16_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u32m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint32m4x2_t test_vloxseg2ei16_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u32m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x2_t test_vloxseg2ei16_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u64m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2x2_t test_vloxseg2ei16_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u64m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint64m4x2_t test_vloxseg2ei16_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg2ei16_v_u64m4x2_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxseg2ei32.c b/auto-generated/policy_funcs/llvm-api-tests/vloxseg2ei32.c index d0ba2b94f..8979a6d96 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxseg2ei32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxseg2ei32.c @@ -1,743 +1,1107 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tu(vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f16mf4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tu(vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f16mf2x2_tu(vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tu(vfloat16m1x2_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tu(vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f16m1x2_tu(vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tu(vfloat16m2x2_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tu(vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f16m2x2_tu(vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tu(vfloat16m4x2_t vd, const _Float16 *rs1, vuint32m8_t rs2, size_t vl) { +vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tu(vfloat16m4x2_t vd, + const _Float16 *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f16m4x2_tu(vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tu(vfloat32mf2x2_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f32mf2x2_tu(vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tu(vfloat32m1x2_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tu(vfloat32m1x2_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei32_v_f32m1x2_tu(vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tu(vfloat32m2x2_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tu(vfloat32m2x2_t vd, + const float *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei32_v_f32m2x2_tu(vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tu(vfloat32m4x2_t vd, const float *rs1, vuint32m4_t rs2, size_t vl) { +vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tu(vfloat32m4x2_t vd, + const float *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg2ei32_v_f32m4x2_tu(vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tu(vfloat64m1x2_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tu(vfloat64m1x2_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f64m1x2_tu(vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tu(vfloat64m2x2_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tu(vfloat64m2x2_t vd, + const double *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f64m2x2_tu(vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tu(vfloat64m4x2_t vd, const double *rs1, vuint32m2_t rs2, size_t vl) { +vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tu(vfloat64m4x2_t vd, + const double *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f64m4x2_tu(vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i8mf8x2_tu(vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i8mf4x2_tu(vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i8mf2x2_tu(vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i8m1x2_tu(vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i8m2x2_tu(vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tu(vint16mf4x2_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i16mf4x2_tu(vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tu(vint16mf2x2_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i16mf2x2_tu(vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i16m1x2_tu(vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i16m2x2_tu(vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { +vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i16m4x2_tu(vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tu(vint32mf2x2_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i32mf2x2_tu(vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i32m1x2_tu(vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i32m2x2_tu(vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { +vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i32m4x2_tu(vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i64m1x2_tu(vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i64m2x2_tu(vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { +vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i64m4x2_tu(vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tu(vuint8mf8x2_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u8mf8x2_tu(vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tu(vuint8mf4x2_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u8mf4x2_tu(vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tu(vuint8mf2x2_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u8mf2x2_tu(vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u8m1x2_tu(vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u8m2x2_tu(vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tu(vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u16mf4x2_tu(vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tu(vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u16mf2x2_tu(vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tu(vuint16m1x2_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u16m1x2_tu(vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tu(vuint16m2x2_t vd, + const uint16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u16m2x2_tu(vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tu(vuint16m4x2_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tu(vuint16m4x2_t vd, + const uint16_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u16m4x2_tu(vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tu(vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u32mf2x2_tu(vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tu(vuint32m1x2_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u32m1x2_tu(vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tu(vuint32m2x2_t vd, + const uint32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u32m2x2_tu(vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tu(vuint32m4x2_t vd, + const uint32_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u32m4x2_tu(vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tu(vuint64m1x2_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u64m1x2_tu(vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tu(vuint64m2x2_t vd, + const uint64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u64m2x2_tu(vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tu(vuint64m4x2_t vd, + const uint64_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u64m4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tum(vbool64_t vm, + vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tum(vbool32_t vm, + vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f16m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f16m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, const _Float16 *rs1, vuint32m8_t rs2, size_t vl) { +vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, + const _Float16 *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f16m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tum(vbool64_t vm, + vfloat32mf2x2_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f32m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f32m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, vuint32m4_t rs2, size_t vl) { +vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f32m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f64m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f64m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, vuint32m2_t rs2, size_t vl) { +vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f64m4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei32_v_i8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei32_v_i8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg2ei32_v_i8m1x2_tum(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, + const int8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxseg2ei32_v_i8m2x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i16m1x2_tum(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i16m2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { +vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, + const int16_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i16m4x2_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i32m1x2_tum(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i32m2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { +vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i32m4x2_tum(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i64m1x2_tum(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i64m2x2_tum(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { +vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i64m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg2ei32_v_u8m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, + const uint8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxseg2ei32_v_u8m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u16m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u16m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, + const uint16_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u16m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u32m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u32m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u32m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u64m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u64m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u64m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_tumu(vbool64_t vm, + vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_tumu(vbool32_t vm, + vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, const _Float16 *rs1, vuint32m8_t rs2, size_t vl) { +vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, + const _Float16 *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_tumu(vbool64_t vm, + vfloat32mf2x2_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, vuint32m4_t rs2, size_t vl) { +vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, vuint32m2_t rs2, size_t vl) { +vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg2ei32_v_i8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, + const int8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxseg2ei32_v_i8m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { +vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, + const int16_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { +vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { +vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, + const uint8_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u8m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_tumu(vbool64_t vm, + vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_tumu(vbool32_t vm, + vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, + const uint16_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_tumu(vbool64_t vm, + vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x2_t test_vloxseg2ei32_v_f16mf4x2_mu(vbool64_t vm, + vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x2_t test_vloxseg2ei32_v_f16mf2x2_mu(vbool32_t vm, + vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x2_t test_vloxseg2ei32_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f16m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2x2_t test_vloxseg2ei32_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f16m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, const _Float16 *rs1, vuint32m8_t rs2, size_t vl) { +vfloat16m4x2_t test_vloxseg2ei32_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, + const _Float16 *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f16m4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x2_t test_vloxseg2ei32_v_f32mf2x2_mu(vbool64_t vm, + vfloat32mf2x2_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x2_t test_vloxseg2ei32_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei32_v_f32m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2x2_t test_vloxseg2ei32_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei32_v_f32m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, vuint32m4_t rs2, size_t vl) { +vfloat32m4x2_t test_vloxseg2ei32_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg2ei32_v_f32m4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x2_t test_vloxseg2ei32_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f64m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2x2_t test_vloxseg2ei32_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f64m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, vuint32m2_t rs2, size_t vl) { +vfloat64m4x2_t test_vloxseg2ei32_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_f64m4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x2_t test_vloxseg2ei32_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxseg2ei32_v_i8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x2_t test_vloxseg2ei32_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei32_v_i8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x2_t test_vloxseg2ei32_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei32_v_i8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x2_t test_vloxseg2ei32_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg2ei32_v_i8m1x2_mu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2x2_t test_vloxseg2ei32_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, + const int8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxseg2ei32_v_i8m2x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x2_t test_vloxseg2ei32_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x2_t test_vloxseg2ei32_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x2_t test_vloxseg2ei32_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei32_v_i16m1x2_mu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2x2_t test_vloxseg2ei32_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg2ei32_v_i16m2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { +vint16m4x2_t test_vloxseg2ei32_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, + const int16_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxseg2ei32_v_i16m4x2_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x2_t test_vloxseg2ei32_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x2_t test_vloxseg2ei32_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei32_v_i32m1x2_mu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2x2_t test_vloxseg2ei32_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei32_v_i32m2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { +vint32m4x2_t test_vloxseg2ei32_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg2ei32_v_i32m4x2_mu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x2_t test_vloxseg2ei32_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_i64m1x2_mu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2x2_t test_vloxseg2ei32_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei32_v_i64m2x2_mu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { +vint64m4x2_t test_vloxseg2ei32_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei32_v_i64m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x2_t test_vloxseg2ei32_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x2_t test_vloxseg2ei32_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x2_t test_vloxseg2ei32_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x2_t test_vloxseg2ei32_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg2ei32_v_u8m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2x2_t test_vloxseg2ei32_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, + const uint8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxseg2ei32_v_u8m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x2_t test_vloxseg2ei32_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x2_t test_vloxseg2ei32_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x2_t test_vloxseg2ei32_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u16m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2x2_t test_vloxseg2ei32_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u16m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint16m4x2_t test_vloxseg2ei32_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, + const uint16_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u16m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x2_t test_vloxseg2ei32_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x2_t test_vloxseg2ei32_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u32m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2x2_t test_vloxseg2ei32_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u32m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint32m4x2_t test_vloxseg2ei32_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u32m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x2_t test_vloxseg2ei32_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u64m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2x2_t test_vloxseg2ei32_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u64m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint64m4x2_t test_vloxseg2ei32_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg2ei32_v_u64m4x2_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxseg2ei64.c b/auto-generated/policy_funcs/llvm-api-tests/vloxseg2ei64.c index 960204bfc..b21767296 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxseg2ei64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxseg2ei64.c @@ -1,663 +1,990 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tu(vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f16mf4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tu(vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f16mf2x2_tu(vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tu(vfloat16m1x2_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tu(vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f16m1x2_tu(vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tu(vfloat16m2x2_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tu(vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f16m2x2_tu(vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tu(vfloat32mf2x2_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f32mf2x2_tu(vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tu(vfloat32m1x2_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tu(vfloat32m1x2_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei64_v_f32m1x2_tu(vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tu(vfloat32m2x2_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tu(vfloat32m2x2_t vd, + const float *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxseg2ei64_v_f32m2x2_tu(vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tu(vfloat32m4x2_t vd, const float *rs1, vuint64m8_t rs2, size_t vl) { +vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tu(vfloat32m4x2_t vd, + const float *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg2ei64_v_f32m4x2_tu(vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tu(vfloat64m1x2_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tu(vfloat64m1x2_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f64m1x2_tu(vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tu(vfloat64m2x2_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tu(vfloat64m2x2_t vd, + const double *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f64m2x2_tu(vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tu(vfloat64m4x2_t vd, const double *rs1, vuint64m4_t rs2, size_t vl) { +vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tu(vfloat64m4x2_t vd, + const double *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f64m4x2_tu(vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i8mf8x2_tu(vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i8mf4x2_tu(vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i8mf2x2_tu(vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i8m1x2_tu(vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tu(vint16mf4x2_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i16mf4x2_tu(vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tu(vint16mf2x2_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i16mf2x2_tu(vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i16m1x2_tu(vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i16m2x2_tu(vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tu(vint32mf2x2_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i32mf2x2_tu(vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i32m1x2_tu(vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i32m2x2_tu(vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { +vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i32m4x2_tu(vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i64m1x2_tu(vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i64m2x2_tu(vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { +vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i64m4x2_tu(vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tu(vuint8mf8x2_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u8mf8x2_tu(vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tu(vuint8mf4x2_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u8mf4x2_tu(vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tu(vuint8mf2x2_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u8mf2x2_tu(vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u8m1x2_tu(vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tu(vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u16mf4x2_tu(vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tu(vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u16mf2x2_tu(vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tu(vuint16m1x2_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u16m1x2_tu(vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tu(vuint16m2x2_t vd, + const uint16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u16m2x2_tu(vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tu(vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u32mf2x2_tu(vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tu(vuint32m1x2_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u32m1x2_tu(vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tu(vuint32m2x2_t vd, + const uint32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u32m2x2_tu(vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tu(vuint32m4x2_t vd, + const uint32_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u32m4x2_tu(vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tu(vuint64m1x2_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u64m1x2_tu(vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tu(vuint64m2x2_t vd, + const uint64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u64m2x2_tu(vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tu(vuint64m4x2_t vd, + const uint64_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u64m4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tum(vbool64_t vm, + vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tum(vbool32_t vm, + vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f16m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f16m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tum(vbool64_t vm, + vfloat32mf2x2_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f32m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f32m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, vuint64m8_t rs2, size_t vl) { +vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f32m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f64m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f64m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, vuint64m4_t rs2, size_t vl) { +vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f64m4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei64_v_i8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei64_v_i8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxseg2ei64_v_i8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg2ei64_v_i8m1x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i16m1x2_tum(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i16m2x2_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i32m1x2_tum(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i32m2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { +vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i32m4x2_tum(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i64m1x2_tum(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i64m2x2_tum(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { +vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i64m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg2ei64_v_u8m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u16m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u16m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u32m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u32m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u32m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u64m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u64m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u64m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_tumu(vbool64_t vm, + vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_tumu(vbool32_t vm, + vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_tumu(vbool64_t vm, + vfloat32mf2x2_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, vuint64m8_t rs2, size_t vl) { +vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, vuint64m4_t rs2, size_t vl) { +vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg2ei64_v_i8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { +vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { +vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_tumu(vbool64_t vm, + vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_tumu(vbool32_t vm, + vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_tumu(vbool64_t vm, + vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x2_t test_vloxseg2ei64_v_f16mf4x2_mu(vbool64_t vm, + vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x2_t test_vloxseg2ei64_v_f16mf2x2_mu(vbool32_t vm, + vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x2_t test_vloxseg2ei64_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f16m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2x2_t test_vloxseg2ei64_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f16m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x2_t test_vloxseg2ei64_v_f32mf2x2_mu(vbool64_t vm, + vfloat32mf2x2_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x2_t test_vloxseg2ei64_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei64_v_f32m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2x2_t test_vloxseg2ei64_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxseg2ei64_v_f32m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, vuint64m8_t rs2, size_t vl) { +vfloat32m4x2_t test_vloxseg2ei64_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg2ei64_v_f32m4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x2_t test_vloxseg2ei64_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f64m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2x2_t test_vloxseg2ei64_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f64m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, vuint64m4_t rs2, size_t vl) { +vfloat64m4x2_t test_vloxseg2ei64_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_f64m4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x2_t test_vloxseg2ei64_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei64_v_i8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x2_t test_vloxseg2ei64_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei64_v_i8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x2_t test_vloxseg2ei64_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxseg2ei64_v_i8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x2_t test_vloxseg2ei64_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg2ei64_v_i8m1x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x2_t test_vloxseg2ei64_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x2_t test_vloxseg2ei64_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x2_t test_vloxseg2ei64_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxseg2ei64_v_i16m1x2_mu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2x2_t test_vloxseg2ei64_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg2ei64_v_i16m2x2_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x2_t test_vloxseg2ei64_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_i32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x2_t test_vloxseg2ei64_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei64_v_i32m1x2_mu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2x2_t test_vloxseg2ei64_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxseg2ei64_v_i32m2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { +vint32m4x2_t test_vloxseg2ei64_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg2ei64_v_i32m4x2_mu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x2_t test_vloxseg2ei64_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei64_v_i64m1x2_mu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2x2_t test_vloxseg2ei64_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei64_v_i64m2x2_mu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { +vint64m4x2_t test_vloxseg2ei64_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxseg2ei64_v_i64m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x2_t test_vloxseg2ei64_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x2_t test_vloxseg2ei64_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x2_t test_vloxseg2ei64_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x2_t test_vloxseg2ei64_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg2ei64_v_u8m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x2_t test_vloxseg2ei64_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x2_t test_vloxseg2ei64_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x2_t test_vloxseg2ei64_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u16m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2x2_t test_vloxseg2ei64_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u16m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x2_t test_vloxseg2ei64_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x2_t test_vloxseg2ei64_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u32m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2x2_t test_vloxseg2ei64_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u32m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint32m4x2_t test_vloxseg2ei64_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u32m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x2_t test_vloxseg2ei64_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u64m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2x2_t test_vloxseg2ei64_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u64m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint64m4x2_t test_vloxseg2ei64_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg2ei64_v_u64m4x2_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxseg2ei8.c b/auto-generated/policy_funcs/llvm-api-tests/vloxseg2ei8.c index c1924658b..9fec44f14 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxseg2ei8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxseg2ei8.c @@ -1,775 +1,1147 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tu(vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f16mf4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tu(vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f16mf2x2_tu(vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tu(vfloat16m1x2_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tu(vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f16m1x2_tu(vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tu(vfloat16m2x2_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tu(vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f16m2x2_tu(vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tu(vfloat16m4x2_t vd, const _Float16 *rs1, vuint8m2_t rs2, size_t vl) { +vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tu(vfloat16m4x2_t vd, + const _Float16 *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f16m4x2_tu(vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tu(vfloat32mf2x2_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f32mf2x2_tu(vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tu(vfloat32m1x2_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tu(vfloat32m1x2_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_f32m1x2_tu(vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tu(vfloat32m2x2_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tu(vfloat32m2x2_t vd, + const float *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_f32m2x2_tu(vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tu(vfloat32m4x2_t vd, const float *rs1, vuint8m1_t rs2, size_t vl) { +vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tu(vfloat32m4x2_t vd, + const float *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_f32m4x2_tu(vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tu(vfloat64m1x2_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tu(vfloat64m1x2_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_f64m1x2_tu(vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tu(vfloat64m2x2_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tu(vfloat64m2x2_t vd, + const double *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_f64m2x2_tu(vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tu(vfloat64m4x2_t vd, const double *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tu(vfloat64m4x2_t vd, + const double *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_f64m4x2_tu(vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i8mf8x2_tu(vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i8mf4x2_tu(vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i8mf2x2_tu(vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i8m1x2_tu(vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i8m2x2_tu(vd, rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tu(vint8m4x2_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { +vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tu(vint8m4x2_t vd, const int8_t *rs1, + vuint8m4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i8m4x2_tu(vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tu(vint16mf4x2_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i16mf4x2_tu(vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tu(vint16mf2x2_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i16mf2x2_tu(vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i16m1x2_tu(vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i16m2x2_tu(vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { +vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i16m4x2_tu(vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tu(vint32mf2x2_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i32mf2x2_tu(vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i32m1x2_tu(vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i32m2x2_tu(vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { +vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i32m4x2_tu(vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i64m1x2_tu(vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i64m2x2_tu(vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i64m4x2_tu(vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tu(vuint8mf8x2_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_u8mf8x2_tu(vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tu(vuint8mf4x2_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_u8mf4x2_tu(vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tu(vuint8mf2x2_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_u8mf2x2_tu(vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u8m1x2_tu(vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u8m2x2_tu(vd, rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tu(vuint8m4x2_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { +vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tu(vuint8m4x2_t vd, const uint8_t *rs1, + vuint8m4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u8m4x2_tu(vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tu(vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u16mf4x2_tu(vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tu(vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u16mf2x2_tu(vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tu(vuint16m1x2_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u16m1x2_tu(vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tu(vuint16m2x2_t vd, + const uint16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_u16m2x2_tu(vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tu(vuint16m4x2_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tu(vuint16m4x2_t vd, + const uint16_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_u16m4x2_tu(vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tu(vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u32mf2x2_tu(vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tu(vuint32m1x2_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u32m1x2_tu(vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tu(vuint32m2x2_t vd, + const uint32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u32m2x2_tu(vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tu(vuint32m4x2_t vd, + const uint32_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_u32m4x2_tu(vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tu(vuint64m1x2_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u64m1x2_tu(vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tu(vuint64m2x2_t vd, + const uint64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u64m2x2_tu(vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tu(vuint64m4x2_t vd, + const uint64_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u64m4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tum(vbool64_t vm, + vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tum(vbool32_t vm, + vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f16m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f16m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, const _Float16 *rs1, vuint8m2_t rs2, size_t vl) { +vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, + const _Float16 *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f16m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tum(vbool64_t vm, + vfloat32mf2x2_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_f32m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_f32m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, vuint8m1_t rs2, size_t vl) { +vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_f32m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f64m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f64m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f64m4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i8m1x2_tum(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, + const int8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i8m2x2_tum(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tum(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { +vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tum(vbool2_t vm, vint8m4x2_t vd, + const int8_t *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i8m4x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i16m1x2_tum(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i16m2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { +vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, + const int16_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i16m4x2_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i32m1x2_tum(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i32m2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { +vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i32m4x2_tum(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i64m1x2_tum(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i64m2x2_tum(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i64m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_u8m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, + const uint8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_u8m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tum(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { +vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tum(vbool2_t vm, vuint8m4x2_t vd, + const uint8_t *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_u8m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u16m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u16m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, + const uint16_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u16m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u32m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u32m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u32m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u64m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u64m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u64m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_tumu(vbool64_t vm, + vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_tumu(vbool32_t vm, + vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, const _Float16 *rs1, vuint8m2_t rs2, size_t vl) { +vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, + const _Float16 *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_tumu(vbool64_t vm, + vfloat32mf2x2_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, vuint8m1_t rs2, size_t vl) { +vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_f32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, + const int8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i8m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tumu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { +vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_tumu(vbool2_t vm, vint8m4x2_t vd, + const int8_t *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i8m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { +vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, + const int16_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { +vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_u8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, + const uint8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_u8m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tumu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { +vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_tumu(vbool2_t vm, vuint8m4x2_t vd, + const uint8_t *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_u8m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, + const uint16_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x2_t test_vloxseg2ei8_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x2_t test_vloxseg2ei8_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x2_t test_vloxseg2ei8_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f16m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2x2_t test_vloxseg2ei8_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f16m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, const _Float16 *rs1, vuint8m2_t rs2, size_t vl) { +vfloat16m4x2_t test_vloxseg2ei8_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, + const _Float16 *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f16m4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x2_t test_vloxseg2ei8_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_f32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x2_t test_vloxseg2ei8_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_f32m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2x2_t test_vloxseg2ei8_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_f32m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, vuint8m1_t rs2, size_t vl) { +vfloat32m4x2_t test_vloxseg2ei8_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_f32m4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x2_t test_vloxseg2ei8_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_f64m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2x2_t test_vloxseg2ei8_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_f64m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat64m4x2_t test_vloxseg2ei8_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_f64m4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x2_t test_vloxseg2ei8_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x2_t test_vloxseg2ei8_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x2_t test_vloxseg2ei8_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x2_t test_vloxseg2ei8_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i8m1x2_mu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2x2_t test_vloxseg2ei8_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, + const int8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i8m2x2_mu(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_mu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { +vint8m4x2_t test_vloxseg2ei8_v_i8m4x2_mu(vbool2_t vm, vint8m4x2_t vd, + const int8_t *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i8m4x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x2_t test_vloxseg2ei8_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x2_t test_vloxseg2ei8_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x2_t test_vloxseg2ei8_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i16m1x2_mu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2x2_t test_vloxseg2ei8_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i16m2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { +vint16m4x2_t test_vloxseg2ei8_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, + const int16_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i16m4x2_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x2_t test_vloxseg2ei8_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_i32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x2_t test_vloxseg2ei8_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i32m1x2_mu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2x2_t test_vloxseg2ei8_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i32m2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { +vint32m4x2_t test_vloxseg2ei8_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i32m4x2_mu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x2_t test_vloxseg2ei8_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i64m1x2_mu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2x2_t test_vloxseg2ei8_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i64m2x2_mu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint64m4x2_t test_vloxseg2ei8_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_i64m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x2_t test_vloxseg2ei8_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_u8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x2_t test_vloxseg2ei8_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_u8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x2_t test_vloxseg2ei8_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_u8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x2_t test_vloxseg2ei8_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_u8m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2x2_t test_vloxseg2ei8_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, + const uint8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_u8m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_mu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { +vuint8m4x2_t test_vloxseg2ei8_v_u8m4x2_mu(vbool2_t vm, vuint8m4x2_t vd, + const uint8_t *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_u8m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x2_t test_vloxseg2ei8_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x2_t test_vloxseg2ei8_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x2_t test_vloxseg2ei8_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u16m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2x2_t test_vloxseg2ei8_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_u16m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint16m4x2_t test_vloxseg2ei8_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, + const uint16_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_u16m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x2_t test_vloxseg2ei8_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x2_t test_vloxseg2ei8_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u32m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2x2_t test_vloxseg2ei8_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u32m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint32m4x2_t test_vloxseg2ei8_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg2ei8_v_u32m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x2_t test_vloxseg2ei8_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u64m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2x2_t test_vloxseg2ei8_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u64m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint64m4x2_t test_vloxseg2ei8_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg2ei8_v_u64m4x2_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxseg3ei16.c b/auto-generated/policy_funcs/llvm-api-tests/vloxseg3ei16.c index 737129ae5..282893561 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxseg3ei16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxseg3ei16.c @@ -1,599 +1,894 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tu(vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f16mf4x3_tu(vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tu(vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f16mf2x3_tu(vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tu(vfloat16m1x3_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tu(vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f16m1x3_tu(vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tu(vfloat16m2x3_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tu(vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f16m2x3_tu(vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tu(vfloat32mf2x3_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f32mf2x3_tu(vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tu(vfloat32m1x3_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tu(vfloat32m1x3_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f32m1x3_tu(vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tu(vfloat32m2x3_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tu(vfloat32m2x3_t vd, + const float *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxseg3ei16_v_f32m2x3_tu(vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tu(vfloat64m1x3_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tu(vfloat64m1x3_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f64m1x3_tu(vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tu(vfloat64m2x3_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tu(vfloat64m2x3_t vd, + const double *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f64m2x3_tu(vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i8mf8x3_tu(vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i8mf4x3_tu(vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i8mf2x3_tu(vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i8m1x3_tu(vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i8m2x3_tu(vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tu(vint16mf4x3_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i16mf4x3_tu(vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tu(vint16mf2x3_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i16mf2x3_tu(vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i16m1x3_tu(vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i16m2x3_tu(vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tu(vint32mf2x3_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i32mf2x3_tu(vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i32m1x3_tu(vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i32m2x3_tu(vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i64m1x3_tu(vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i64m2x3_tu(vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tu(vuint8mf8x3_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u8mf8x3_tu(vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tu(vuint8mf4x3_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u8mf4x3_tu(vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tu(vuint8mf2x3_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u8mf2x3_tu(vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u8m1x3_tu(vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u8m2x3_tu(vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tu(vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u16mf4x3_tu(vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tu(vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u16mf2x3_tu(vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tu(vuint16m1x3_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u16m1x3_tu(vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tu(vuint16m2x3_t vd, + const uint16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u16m2x3_tu(vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tu(vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u32mf2x3_tu(vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tu(vuint32m1x3_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u32m1x3_tu(vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tu(vuint32m2x3_t vd, + const uint32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u32m2x3_tu(vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tu(vuint64m1x3_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u64m1x3_tu(vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tu(vuint64m2x3_t vd, + const uint64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u64m2x3_tu(vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tum(vbool64_t vm, + vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tum(vbool32_t vm, + vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f16m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f16m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tum(vbool64_t vm, + vfloat32mf2x3_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f32m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f32m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f64m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f64m2x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxseg3ei16_v_i8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg3ei16_v_i8m1x3_tum(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, + const int8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxseg3ei16_v_i8m2x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i16m1x3_tum(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i16m2x3_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i32m1x3_tum(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i32m2x3_tum(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i64m1x3_tum(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i64m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg3ei16_v_u8m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, + const uint8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxseg3ei16_v_u8m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u16m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u16m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u32m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u32m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u64m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u64m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_tumu(vbool64_t vm, + vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_tumu(vbool32_t vm, + vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_tumu(vbool64_t vm, + vfloat32mf2x3_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg3ei16_v_i8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, + const int8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxseg3ei16_v_i8m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, + const uint8_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u8m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_tumu(vbool64_t vm, + vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_tumu(vbool32_t vm, + vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_tumu(vbool64_t vm, + vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x3_t test_vloxseg3ei16_v_f16mf4x3_mu(vbool64_t vm, + vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x3_t test_vloxseg3ei16_v_f16mf2x3_mu(vbool32_t vm, + vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x3_t test_vloxseg3ei16_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f16m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2x3_t test_vloxseg3ei16_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f16m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x3_t test_vloxseg3ei16_v_f32mf2x3_mu(vbool64_t vm, + vfloat32mf2x3_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x3_t test_vloxseg3ei16_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f32m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2x3_t test_vloxseg3ei16_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxseg3ei16_v_f32m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x3_t test_vloxseg3ei16_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f64m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2x3_t test_vloxseg3ei16_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_f64m2x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x3_t test_vloxseg3ei16_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxseg3ei16_v_i8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x3_t test_vloxseg3ei16_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxseg3ei16_v_i8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x3_t test_vloxseg3ei16_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxseg3ei16_v_i8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x3_t test_vloxseg3ei16_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg3ei16_v_i8m1x3_mu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2x3_t test_vloxseg3ei16_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, + const int8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxseg3ei16_v_i8m2x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x3_t test_vloxseg3ei16_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x3_t test_vloxseg3ei16_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x3_t test_vloxseg3ei16_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxseg3ei16_v_i16m1x3_mu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2x3_t test_vloxseg3ei16_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg3ei16_v_i16m2x3_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x3_t test_vloxseg3ei16_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x3_t test_vloxseg3ei16_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i32m1x3_mu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2x3_t test_vloxseg3ei16_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxseg3ei16_v_i32m2x3_mu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x3_t test_vloxseg3ei16_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i64m1x3_mu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2x3_t test_vloxseg3ei16_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_i64m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x3_t test_vloxseg3ei16_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x3_t test_vloxseg3ei16_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x3_t test_vloxseg3ei16_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x3_t test_vloxseg3ei16_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg3ei16_v_u8m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2x3_t test_vloxseg3ei16_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, + const uint8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxseg3ei16_v_u8m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x3_t test_vloxseg3ei16_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x3_t test_vloxseg3ei16_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x3_t test_vloxseg3ei16_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u16m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2x3_t test_vloxseg3ei16_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u16m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x3_t test_vloxseg3ei16_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x3_t test_vloxseg3ei16_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u32m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2x3_t test_vloxseg3ei16_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u32m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x3_t test_vloxseg3ei16_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u64m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2x3_t test_vloxseg3ei16_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei16_v_u64m2x3_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxseg3ei32.c b/auto-generated/policy_funcs/llvm-api-tests/vloxseg3ei32.c index 9d4cf8db8..9d04fdf23 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxseg3ei32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxseg3ei32.c @@ -1,599 +1,894 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tu(vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f16mf4x3_tu(vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tu(vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f16mf2x3_tu(vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tu(vfloat16m1x3_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tu(vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f16m1x3_tu(vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tu(vfloat16m2x3_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tu(vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f16m2x3_tu(vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tu(vfloat32mf2x3_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f32mf2x3_tu(vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tu(vfloat32m1x3_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tu(vfloat32m1x3_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg3ei32_v_f32m1x3_tu(vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tu(vfloat32m2x3_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tu(vfloat32m2x3_t vd, + const float *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxseg3ei32_v_f32m2x3_tu(vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tu(vfloat64m1x3_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tu(vfloat64m1x3_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f64m1x3_tu(vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tu(vfloat64m2x3_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tu(vfloat64m2x3_t vd, + const double *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f64m2x3_tu(vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i8mf8x3_tu(vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i8mf4x3_tu(vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i8mf2x3_tu(vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i8m1x3_tu(vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i8m2x3_tu(vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tu(vint16mf4x3_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i16mf4x3_tu(vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tu(vint16mf2x3_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i16mf2x3_tu(vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i16m1x3_tu(vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i16m2x3_tu(vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tu(vint32mf2x3_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i32mf2x3_tu(vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i32m1x3_tu(vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i32m2x3_tu(vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i64m1x3_tu(vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i64m2x3_tu(vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tu(vuint8mf8x3_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u8mf8x3_tu(vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tu(vuint8mf4x3_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u8mf4x3_tu(vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tu(vuint8mf2x3_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u8mf2x3_tu(vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u8m1x3_tu(vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u8m2x3_tu(vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tu(vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u16mf4x3_tu(vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tu(vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u16mf2x3_tu(vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tu(vuint16m1x3_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u16m1x3_tu(vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tu(vuint16m2x3_t vd, + const uint16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u16m2x3_tu(vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tu(vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u32mf2x3_tu(vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tu(vuint32m1x3_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u32m1x3_tu(vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tu(vuint32m2x3_t vd, + const uint32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u32m2x3_tu(vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tu(vuint64m1x3_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u64m1x3_tu(vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tu(vuint64m2x3_t vd, + const uint64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u64m2x3_tu(vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tum(vbool64_t vm, + vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tum(vbool32_t vm, + vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f16m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f16m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tum(vbool64_t vm, + vfloat32mf2x3_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f32m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f32m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f64m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f64m2x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg3ei32_v_i8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxseg3ei32_v_i8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg3ei32_v_i8m1x3_tum(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, + const int8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxseg3ei32_v_i8m2x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i16m1x3_tum(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i16m2x3_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i32m1x3_tum(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i32m2x3_tum(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i64m1x3_tum(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i64m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg3ei32_v_u8m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, + const uint8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxseg3ei32_v_u8m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u16m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u16m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u32m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u32m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u64m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u64m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_tumu(vbool64_t vm, + vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_tumu(vbool32_t vm, + vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_tumu(vbool64_t vm, + vfloat32mf2x3_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg3ei32_v_i8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, + const int8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxseg3ei32_v_i8m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, + const uint8_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u8m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_tumu(vbool64_t vm, + vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_tumu(vbool32_t vm, + vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_tumu(vbool64_t vm, + vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x3_t test_vloxseg3ei32_v_f16mf4x3_mu(vbool64_t vm, + vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x3_t test_vloxseg3ei32_v_f16mf2x3_mu(vbool32_t vm, + vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x3_t test_vloxseg3ei32_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f16m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2x3_t test_vloxseg3ei32_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f16m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x3_t test_vloxseg3ei32_v_f32mf2x3_mu(vbool64_t vm, + vfloat32mf2x3_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x3_t test_vloxseg3ei32_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg3ei32_v_f32m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2x3_t test_vloxseg3ei32_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxseg3ei32_v_f32m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x3_t test_vloxseg3ei32_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f64m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2x3_t test_vloxseg3ei32_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_f64m2x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x3_t test_vloxseg3ei32_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxseg3ei32_v_i8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x3_t test_vloxseg3ei32_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg3ei32_v_i8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x3_t test_vloxseg3ei32_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxseg3ei32_v_i8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x3_t test_vloxseg3ei32_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg3ei32_v_i8m1x3_mu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2x3_t test_vloxseg3ei32_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, + const int8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxseg3ei32_v_i8m2x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x3_t test_vloxseg3ei32_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x3_t test_vloxseg3ei32_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x3_t test_vloxseg3ei32_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxseg3ei32_v_i16m1x3_mu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2x3_t test_vloxseg3ei32_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg3ei32_v_i16m2x3_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x3_t test_vloxseg3ei32_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x3_t test_vloxseg3ei32_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg3ei32_v_i32m1x3_mu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2x3_t test_vloxseg3ei32_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxseg3ei32_v_i32m2x3_mu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x3_t test_vloxseg3ei32_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_i64m1x3_mu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2x3_t test_vloxseg3ei32_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg3ei32_v_i64m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x3_t test_vloxseg3ei32_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x3_t test_vloxseg3ei32_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x3_t test_vloxseg3ei32_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x3_t test_vloxseg3ei32_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg3ei32_v_u8m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2x3_t test_vloxseg3ei32_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, + const uint8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxseg3ei32_v_u8m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x3_t test_vloxseg3ei32_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x3_t test_vloxseg3ei32_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x3_t test_vloxseg3ei32_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u16m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2x3_t test_vloxseg3ei32_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u16m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x3_t test_vloxseg3ei32_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x3_t test_vloxseg3ei32_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u32m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2x3_t test_vloxseg3ei32_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u32m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x3_t test_vloxseg3ei32_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u64m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2x3_t test_vloxseg3ei32_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg3ei32_v_u64m2x3_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxseg3ei64.c b/auto-generated/policy_funcs/llvm-api-tests/vloxseg3ei64.c index 5bcadc789..84d0507f1 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxseg3ei64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxseg3ei64.c @@ -1,567 +1,848 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tu(vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f16mf4x3_tu(vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tu(vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f16mf2x3_tu(vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tu(vfloat16m1x3_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tu(vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f16m1x3_tu(vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tu(vfloat16m2x3_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tu(vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f16m2x3_tu(vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tu(vfloat32mf2x3_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f32mf2x3_tu(vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tu(vfloat32m1x3_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tu(vfloat32m1x3_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg3ei64_v_f32m1x3_tu(vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tu(vfloat32m2x3_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tu(vfloat32m2x3_t vd, + const float *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxseg3ei64_v_f32m2x3_tu(vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tu(vfloat64m1x3_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tu(vfloat64m1x3_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f64m1x3_tu(vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tu(vfloat64m2x3_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tu(vfloat64m2x3_t vd, + const double *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f64m2x3_tu(vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i8mf8x3_tu(vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i8mf4x3_tu(vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i8mf2x3_tu(vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i8m1x3_tu(vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tu(vint16mf4x3_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i16mf4x3_tu(vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tu(vint16mf2x3_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i16mf2x3_tu(vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i16m1x3_tu(vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i16m2x3_tu(vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tu(vint32mf2x3_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i32mf2x3_tu(vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i32m1x3_tu(vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i32m2x3_tu(vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i64m1x3_tu(vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i64m2x3_tu(vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tu(vuint8mf8x3_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u8mf8x3_tu(vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tu(vuint8mf4x3_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u8mf4x3_tu(vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tu(vuint8mf2x3_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u8mf2x3_tu(vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u8m1x3_tu(vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tu(vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u16mf4x3_tu(vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tu(vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u16mf2x3_tu(vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tu(vuint16m1x3_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u16m1x3_tu(vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tu(vuint16m2x3_t vd, + const uint16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u16m2x3_tu(vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tu(vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u32mf2x3_tu(vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tu(vuint32m1x3_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u32m1x3_tu(vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tu(vuint32m2x3_t vd, + const uint32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u32m2x3_tu(vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tu(vuint64m1x3_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u64m1x3_tu(vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tu(vuint64m2x3_t vd, + const uint64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u64m2x3_tu(vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tum(vbool64_t vm, + vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tum(vbool32_t vm, + vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f16m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f16m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tum(vbool64_t vm, + vfloat32mf2x3_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f32m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f32m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f64m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f64m2x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxseg3ei64_v_i8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg3ei64_v_i8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxseg3ei64_v_i8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg3ei64_v_i8m1x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i16m1x3_tum(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i16m2x3_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i32m1x3_tum(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i32m2x3_tum(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i64m1x3_tum(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i64m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg3ei64_v_u8m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u16m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u16m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u32m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u32m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u64m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u64m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_tumu(vbool64_t vm, + vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_tumu(vbool32_t vm, + vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_tumu(vbool64_t vm, + vfloat32mf2x3_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg3ei64_v_i8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_tumu(vbool64_t vm, + vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_tumu(vbool32_t vm, + vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_tumu(vbool64_t vm, + vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x3_t test_vloxseg3ei64_v_f16mf4x3_mu(vbool64_t vm, + vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x3_t test_vloxseg3ei64_v_f16mf2x3_mu(vbool32_t vm, + vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x3_t test_vloxseg3ei64_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f16m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2x3_t test_vloxseg3ei64_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f16m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x3_t test_vloxseg3ei64_v_f32mf2x3_mu(vbool64_t vm, + vfloat32mf2x3_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x3_t test_vloxseg3ei64_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg3ei64_v_f32m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2x3_t test_vloxseg3ei64_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxseg3ei64_v_f32m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x3_t test_vloxseg3ei64_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f64m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2x3_t test_vloxseg3ei64_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_f64m2x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x3_t test_vloxseg3ei64_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxseg3ei64_v_i8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x3_t test_vloxseg3ei64_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg3ei64_v_i8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x3_t test_vloxseg3ei64_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxseg3ei64_v_i8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x3_t test_vloxseg3ei64_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg3ei64_v_i8m1x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x3_t test_vloxseg3ei64_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x3_t test_vloxseg3ei64_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x3_t test_vloxseg3ei64_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxseg3ei64_v_i16m1x3_mu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2x3_t test_vloxseg3ei64_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg3ei64_v_i16m2x3_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x3_t test_vloxseg3ei64_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_i32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x3_t test_vloxseg3ei64_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg3ei64_v_i32m1x3_mu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2x3_t test_vloxseg3ei64_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxseg3ei64_v_i32m2x3_mu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x3_t test_vloxseg3ei64_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxseg3ei64_v_i64m1x3_mu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2x3_t test_vloxseg3ei64_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg3ei64_v_i64m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x3_t test_vloxseg3ei64_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x3_t test_vloxseg3ei64_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x3_t test_vloxseg3ei64_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x3_t test_vloxseg3ei64_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg3ei64_v_u8m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x3_t test_vloxseg3ei64_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x3_t test_vloxseg3ei64_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x3_t test_vloxseg3ei64_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u16m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2x3_t test_vloxseg3ei64_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u16m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x3_t test_vloxseg3ei64_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x3_t test_vloxseg3ei64_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u32m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2x3_t test_vloxseg3ei64_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u32m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x3_t test_vloxseg3ei64_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u64m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2x3_t test_vloxseg3ei64_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg3ei64_v_u64m2x3_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxseg3ei8.c b/auto-generated/policy_funcs/llvm-api-tests/vloxseg3ei8.c index fa35c4a65..35b4993f7 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxseg3ei8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxseg3ei8.c @@ -1,599 +1,888 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tu(vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_f16mf4x3_tu(vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tu(vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_f16mf2x3_tu(vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tu(vfloat16m1x3_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tu(vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_f16m1x3_tu(vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tu(vfloat16m2x3_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tu(vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_f16m2x3_tu(vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tu(vfloat32mf2x3_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_f32mf2x3_tu(vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tu(vfloat32m1x3_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tu(vfloat32m1x3_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_f32m1x3_tu(vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tu(vfloat32m2x3_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tu(vfloat32m2x3_t vd, + const float *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_f32m2x3_tu(vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tu(vfloat64m1x3_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tu(vfloat64m1x3_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_f64m1x3_tu(vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tu(vfloat64m2x3_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tu(vfloat64m2x3_t vd, + const double *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_f64m2x3_tu(vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_i8mf8x3_tu(vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_i8mf4x3_tu(vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_i8mf2x3_tu(vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_i8m1x3_tu(vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_i8m2x3_tu(vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tu(vint16mf4x3_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_i16mf4x3_tu(vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tu(vint16mf2x3_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_i16mf2x3_tu(vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_i16m1x3_tu(vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_i16m2x3_tu(vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tu(vint32mf2x3_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_i32mf2x3_tu(vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_i32m1x3_tu(vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_i32m2x3_tu(vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_i64m1x3_tu(vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_i64m2x3_tu(vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tu(vuint8mf8x3_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_u8mf8x3_tu(vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tu(vuint8mf4x3_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_u8mf4x3_tu(vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tu(vuint8mf2x3_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_u8mf2x3_tu(vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u8m1x3_tu(vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u8m2x3_tu(vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tu(vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u16mf4x3_tu(vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tu(vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u16mf2x3_tu(vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tu(vuint16m1x3_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u16m1x3_tu(vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tu(vuint16m2x3_t vd, + const uint16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_u16m2x3_tu(vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tu(vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u32mf2x3_tu(vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tu(vuint32m1x3_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u32m1x3_tu(vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tu(vuint32m2x3_t vd, + const uint32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u32m2x3_tu(vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tu(vuint64m1x3_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u64m1x3_tu(vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tu(vuint64m2x3_t vd, + const uint64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u64m2x3_tu(vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tum(vbool64_t vm, + vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_f16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tum(vbool32_t vm, + vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_f16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_f16m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_f16m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tum(vbool64_t vm, + vfloat32mf2x3_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_f32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_f32m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_f32m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_f64m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_f64m2x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_i8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_i8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_i8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_i8m1x3_tum(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, + const int8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_i8m2x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_i16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_i16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_i16m1x3_tum(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_i16m2x3_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_i32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_i32m1x3_tum(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_i32m2x3_tum(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_i64m1x3_tum(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_i64m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_u8m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, + const uint8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_u8m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u16m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u16m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u32m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u32m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u64m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u64m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_tumu(vbool64_t vm, + vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_f16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_tumu(vbool32_t vm, + vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_f16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_f16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_f16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_tumu(vbool64_t vm, + vfloat32mf2x3_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_f32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_f32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_f32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_f64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_f64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_i8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_i8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_i8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_i8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, + const int8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_i8m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_i16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_i16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_i16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_i16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_i32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_i32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_i32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_i64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_i64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_u8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, + const uint8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_u8m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x3_t test_vloxseg3ei8_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_f16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x3_t test_vloxseg3ei8_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_f16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x3_t test_vloxseg3ei8_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_f16m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2x3_t test_vloxseg3ei8_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_f16m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x3_t test_vloxseg3ei8_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_f32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x3_t test_vloxseg3ei8_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_f32m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2x3_t test_vloxseg3ei8_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_f32m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x3_t test_vloxseg3ei8_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_f64m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2x3_t test_vloxseg3ei8_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_f64m2x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x3_t test_vloxseg3ei8_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_i8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x3_t test_vloxseg3ei8_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_i8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x3_t test_vloxseg3ei8_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_i8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x3_t test_vloxseg3ei8_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_i8m1x3_mu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2x3_t test_vloxseg3ei8_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, + const int8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_i8m2x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x3_t test_vloxseg3ei8_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_i16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x3_t test_vloxseg3ei8_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_i16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x3_t test_vloxseg3ei8_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_i16m1x3_mu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2x3_t test_vloxseg3ei8_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_i16m2x3_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x3_t test_vloxseg3ei8_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_i32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x3_t test_vloxseg3ei8_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_i32m1x3_mu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2x3_t test_vloxseg3ei8_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_i32m2x3_mu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x3_t test_vloxseg3ei8_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_i64m1x3_mu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2x3_t test_vloxseg3ei8_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_i64m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x3_t test_vloxseg3ei8_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_u8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x3_t test_vloxseg3ei8_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_u8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x3_t test_vloxseg3ei8_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_u8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x3_t test_vloxseg3ei8_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_u8m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2x3_t test_vloxseg3ei8_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, + const uint8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_u8m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x3_t test_vloxseg3ei8_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x3_t test_vloxseg3ei8_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x3_t test_vloxseg3ei8_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u16m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2x3_t test_vloxseg3ei8_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg3ei8_v_u16m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x3_t test_vloxseg3ei8_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x3_t test_vloxseg3ei8_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u32m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2x3_t test_vloxseg3ei8_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u32m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x3_t test_vloxseg3ei8_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u64m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2x3_t test_vloxseg3ei8_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg3ei8_v_u64m2x3_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxseg4ei16.c b/auto-generated/policy_funcs/llvm-api-tests/vloxseg4ei16.c index c5d3cb641..7e8f5bffc 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxseg4ei16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxseg4ei16.c @@ -1,599 +1,894 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tu(vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f16mf4x4_tu(vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tu(vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f16mf2x4_tu(vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tu(vfloat16m1x4_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tu(vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f16m1x4_tu(vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tu(vfloat16m2x4_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tu(vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f16m2x4_tu(vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tu(vfloat32mf2x4_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f32mf2x4_tu(vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tu(vfloat32m1x4_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tu(vfloat32m1x4_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f32m1x4_tu(vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tu(vfloat32m2x4_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tu(vfloat32m2x4_t vd, + const float *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxseg4ei16_v_f32m2x4_tu(vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tu(vfloat64m1x4_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tu(vfloat64m1x4_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f64m1x4_tu(vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tu(vfloat64m2x4_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tu(vfloat64m2x4_t vd, + const double *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f64m2x4_tu(vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i8mf8x4_tu(vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i8mf4x4_tu(vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i8mf2x4_tu(vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i8m1x4_tu(vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i8m2x4_tu(vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tu(vint16mf4x4_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i16mf4x4_tu(vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tu(vint16mf2x4_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i16mf2x4_tu(vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i16m1x4_tu(vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i16m2x4_tu(vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tu(vint32mf2x4_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i32mf2x4_tu(vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i32m1x4_tu(vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i32m2x4_tu(vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i64m1x4_tu(vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i64m2x4_tu(vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tu(vuint8mf8x4_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u8mf8x4_tu(vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tu(vuint8mf4x4_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u8mf4x4_tu(vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tu(vuint8mf2x4_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u8mf2x4_tu(vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u8m1x4_tu(vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u8m2x4_tu(vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tu(vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u16mf4x4_tu(vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tu(vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u16mf2x4_tu(vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tu(vuint16m1x4_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u16m1x4_tu(vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tu(vuint16m2x4_t vd, + const uint16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u16m2x4_tu(vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tu(vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u32mf2x4_tu(vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tu(vuint32m1x4_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u32m1x4_tu(vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tu(vuint32m2x4_t vd, + const uint32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u32m2x4_tu(vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tu(vuint64m1x4_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u64m1x4_tu(vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tu(vuint64m2x4_t vd, + const uint64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u64m2x4_tu(vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tum(vbool64_t vm, + vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tum(vbool32_t vm, + vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f16m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f16m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tum(vbool64_t vm, + vfloat32mf2x4_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f32m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f32m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f64m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f64m2x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxseg4ei16_v_i8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg4ei16_v_i8m1x4_tum(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, + const int8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxseg4ei16_v_i8m2x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i16m1x4_tum(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i16m2x4_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i32m1x4_tum(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i32m2x4_tum(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i64m1x4_tum(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i64m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg4ei16_v_u8m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, + const uint8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxseg4ei16_v_u8m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u16m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u16m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u32m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u32m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u64m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u64m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_tumu(vbool64_t vm, + vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_tumu(vbool32_t vm, + vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_tumu(vbool64_t vm, + vfloat32mf2x4_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg4ei16_v_i8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, + const int8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxseg4ei16_v_i8m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, + const uint8_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u8m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_tumu(vbool64_t vm, + vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_tumu(vbool32_t vm, + vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_tumu(vbool64_t vm, + vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x4_t test_vloxseg4ei16_v_f16mf4x4_mu(vbool64_t vm, + vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x4_t test_vloxseg4ei16_v_f16mf2x4_mu(vbool32_t vm, + vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x4_t test_vloxseg4ei16_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f16m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2x4_t test_vloxseg4ei16_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f16m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x4_t test_vloxseg4ei16_v_f32mf2x4_mu(vbool64_t vm, + vfloat32mf2x4_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x4_t test_vloxseg4ei16_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f32m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2x4_t test_vloxseg4ei16_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxseg4ei16_v_f32m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x4_t test_vloxseg4ei16_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f64m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2x4_t test_vloxseg4ei16_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_f64m2x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x4_t test_vloxseg4ei16_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxseg4ei16_v_i8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x4_t test_vloxseg4ei16_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxseg4ei16_v_i8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x4_t test_vloxseg4ei16_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxseg4ei16_v_i8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x4_t test_vloxseg4ei16_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg4ei16_v_i8m1x4_mu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2x4_t test_vloxseg4ei16_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, + const int8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxseg4ei16_v_i8m2x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x4_t test_vloxseg4ei16_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x4_t test_vloxseg4ei16_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x4_t test_vloxseg4ei16_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxseg4ei16_v_i16m1x4_mu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2x4_t test_vloxseg4ei16_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg4ei16_v_i16m2x4_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x4_t test_vloxseg4ei16_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x4_t test_vloxseg4ei16_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i32m1x4_mu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2x4_t test_vloxseg4ei16_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxseg4ei16_v_i32m2x4_mu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x4_t test_vloxseg4ei16_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i64m1x4_mu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2x4_t test_vloxseg4ei16_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_i64m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x4_t test_vloxseg4ei16_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x4_t test_vloxseg4ei16_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x4_t test_vloxseg4ei16_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x4_t test_vloxseg4ei16_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg4ei16_v_u8m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2x4_t test_vloxseg4ei16_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, + const uint8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vloxseg4ei16_v_u8m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x4_t test_vloxseg4ei16_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x4_t test_vloxseg4ei16_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x4_t test_vloxseg4ei16_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u16m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2x4_t test_vloxseg4ei16_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u16m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x4_t test_vloxseg4ei16_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x4_t test_vloxseg4ei16_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u32m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2x4_t test_vloxseg4ei16_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u32m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x4_t test_vloxseg4ei16_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u64m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2x4_t test_vloxseg4ei16_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei16_v_u64m2x4_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxseg4ei32.c b/auto-generated/policy_funcs/llvm-api-tests/vloxseg4ei32.c index b8b35d830..94704fca0 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxseg4ei32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxseg4ei32.c @@ -1,599 +1,894 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tu(vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f16mf4x4_tu(vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tu(vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f16mf2x4_tu(vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tu(vfloat16m1x4_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tu(vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f16m1x4_tu(vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tu(vfloat16m2x4_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tu(vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f16m2x4_tu(vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tu(vfloat32mf2x4_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f32mf2x4_tu(vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tu(vfloat32m1x4_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tu(vfloat32m1x4_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg4ei32_v_f32m1x4_tu(vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tu(vfloat32m2x4_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tu(vfloat32m2x4_t vd, + const float *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxseg4ei32_v_f32m2x4_tu(vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tu(vfloat64m1x4_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tu(vfloat64m1x4_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f64m1x4_tu(vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tu(vfloat64m2x4_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tu(vfloat64m2x4_t vd, + const double *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f64m2x4_tu(vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i8mf8x4_tu(vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i8mf4x4_tu(vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i8mf2x4_tu(vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i8m1x4_tu(vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i8m2x4_tu(vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tu(vint16mf4x4_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i16mf4x4_tu(vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tu(vint16mf2x4_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i16mf2x4_tu(vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i16m1x4_tu(vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i16m2x4_tu(vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tu(vint32mf2x4_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i32mf2x4_tu(vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i32m1x4_tu(vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i32m2x4_tu(vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i64m1x4_tu(vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i64m2x4_tu(vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tu(vuint8mf8x4_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u8mf8x4_tu(vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tu(vuint8mf4x4_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u8mf4x4_tu(vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tu(vuint8mf2x4_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u8mf2x4_tu(vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u8m1x4_tu(vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u8m2x4_tu(vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tu(vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u16mf4x4_tu(vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tu(vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u16mf2x4_tu(vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tu(vuint16m1x4_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u16m1x4_tu(vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tu(vuint16m2x4_t vd, + const uint16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u16m2x4_tu(vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tu(vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u32mf2x4_tu(vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tu(vuint32m1x4_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u32m1x4_tu(vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tu(vuint32m2x4_t vd, + const uint32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u32m2x4_tu(vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tu(vuint64m1x4_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u64m1x4_tu(vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tu(vuint64m2x4_t vd, + const uint64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u64m2x4_tu(vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tum(vbool64_t vm, + vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tum(vbool32_t vm, + vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f16m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f16m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tum(vbool64_t vm, + vfloat32mf2x4_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f32m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f32m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f64m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f64m2x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg4ei32_v_i8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxseg4ei32_v_i8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg4ei32_v_i8m1x4_tum(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, + const int8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxseg4ei32_v_i8m2x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i16m1x4_tum(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i16m2x4_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i32m1x4_tum(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i32m2x4_tum(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i64m1x4_tum(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i64m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg4ei32_v_u8m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, + const uint8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxseg4ei32_v_u8m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u16m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u16m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u32m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u32m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u64m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u64m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_tumu(vbool64_t vm, + vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_tumu(vbool32_t vm, + vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_tumu(vbool64_t vm, + vfloat32mf2x4_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg4ei32_v_i8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, + const int8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxseg4ei32_v_i8m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, + const uint8_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u8m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_tumu(vbool64_t vm, + vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_tumu(vbool32_t vm, + vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_tumu(vbool64_t vm, + vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x4_t test_vloxseg4ei32_v_f16mf4x4_mu(vbool64_t vm, + vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x4_t test_vloxseg4ei32_v_f16mf2x4_mu(vbool32_t vm, + vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x4_t test_vloxseg4ei32_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f16m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2x4_t test_vloxseg4ei32_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f16m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x4_t test_vloxseg4ei32_v_f32mf2x4_mu(vbool64_t vm, + vfloat32mf2x4_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x4_t test_vloxseg4ei32_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg4ei32_v_f32m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2x4_t test_vloxseg4ei32_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxseg4ei32_v_f32m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x4_t test_vloxseg4ei32_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f64m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2x4_t test_vloxseg4ei32_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_f64m2x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x4_t test_vloxseg4ei32_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxseg4ei32_v_i8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x4_t test_vloxseg4ei32_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg4ei32_v_i8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x4_t test_vloxseg4ei32_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxseg4ei32_v_i8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x4_t test_vloxseg4ei32_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg4ei32_v_i8m1x4_mu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2x4_t test_vloxseg4ei32_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, + const int8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxseg4ei32_v_i8m2x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x4_t test_vloxseg4ei32_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x4_t test_vloxseg4ei32_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x4_t test_vloxseg4ei32_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxseg4ei32_v_i16m1x4_mu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2x4_t test_vloxseg4ei32_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg4ei32_v_i16m2x4_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x4_t test_vloxseg4ei32_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x4_t test_vloxseg4ei32_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg4ei32_v_i32m1x4_mu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2x4_t test_vloxseg4ei32_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxseg4ei32_v_i32m2x4_mu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x4_t test_vloxseg4ei32_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_i64m1x4_mu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2x4_t test_vloxseg4ei32_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg4ei32_v_i64m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x4_t test_vloxseg4ei32_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x4_t test_vloxseg4ei32_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x4_t test_vloxseg4ei32_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x4_t test_vloxseg4ei32_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg4ei32_v_u8m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2x4_t test_vloxseg4ei32_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, + const uint8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vloxseg4ei32_v_u8m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x4_t test_vloxseg4ei32_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x4_t test_vloxseg4ei32_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x4_t test_vloxseg4ei32_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u16m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2x4_t test_vloxseg4ei32_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u16m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x4_t test_vloxseg4ei32_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x4_t test_vloxseg4ei32_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u32m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2x4_t test_vloxseg4ei32_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u32m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x4_t test_vloxseg4ei32_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u64m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2x4_t test_vloxseg4ei32_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg4ei32_v_u64m2x4_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxseg4ei64.c b/auto-generated/policy_funcs/llvm-api-tests/vloxseg4ei64.c index d7a56546a..68b557ed3 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxseg4ei64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxseg4ei64.c @@ -1,567 +1,848 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tu(vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f16mf4x4_tu(vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tu(vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f16mf2x4_tu(vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tu(vfloat16m1x4_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tu(vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f16m1x4_tu(vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tu(vfloat16m2x4_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tu(vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f16m2x4_tu(vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tu(vfloat32mf2x4_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f32mf2x4_tu(vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tu(vfloat32m1x4_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tu(vfloat32m1x4_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg4ei64_v_f32m1x4_tu(vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tu(vfloat32m2x4_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tu(vfloat32m2x4_t vd, + const float *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxseg4ei64_v_f32m2x4_tu(vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tu(vfloat64m1x4_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tu(vfloat64m1x4_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f64m1x4_tu(vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tu(vfloat64m2x4_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tu(vfloat64m2x4_t vd, + const double *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f64m2x4_tu(vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i8mf8x4_tu(vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i8mf4x4_tu(vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i8mf2x4_tu(vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i8m1x4_tu(vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tu(vint16mf4x4_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i16mf4x4_tu(vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tu(vint16mf2x4_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i16mf2x4_tu(vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i16m1x4_tu(vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i16m2x4_tu(vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tu(vint32mf2x4_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i32mf2x4_tu(vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i32m1x4_tu(vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i32m2x4_tu(vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i64m1x4_tu(vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i64m2x4_tu(vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tu(vuint8mf8x4_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u8mf8x4_tu(vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tu(vuint8mf4x4_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u8mf4x4_tu(vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tu(vuint8mf2x4_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u8mf2x4_tu(vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u8m1x4_tu(vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tu(vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u16mf4x4_tu(vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tu(vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u16mf2x4_tu(vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tu(vuint16m1x4_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u16m1x4_tu(vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tu(vuint16m2x4_t vd, + const uint16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u16m2x4_tu(vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tu(vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u32mf2x4_tu(vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tu(vuint32m1x4_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u32m1x4_tu(vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tu(vuint32m2x4_t vd, + const uint32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u32m2x4_tu(vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tu(vuint64m1x4_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u64m1x4_tu(vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tu(vuint64m2x4_t vd, + const uint64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u64m2x4_tu(vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tum(vbool64_t vm, + vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tum(vbool32_t vm, + vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f16m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f16m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tum(vbool64_t vm, + vfloat32mf2x4_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f32m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f32m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f64m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f64m2x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxseg4ei64_v_i8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg4ei64_v_i8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxseg4ei64_v_i8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg4ei64_v_i8m1x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i16m1x4_tum(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i16m2x4_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i32m1x4_tum(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i32m2x4_tum(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i64m1x4_tum(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i64m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg4ei64_v_u8m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u16m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u16m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u32m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u32m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u64m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u64m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_tumu(vbool64_t vm, + vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_tumu(vbool32_t vm, + vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_tumu(vbool64_t vm, + vfloat32mf2x4_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg4ei64_v_i8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_tumu(vbool64_t vm, + vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_tumu(vbool32_t vm, + vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_tumu(vbool64_t vm, + vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x4_t test_vloxseg4ei64_v_f16mf4x4_mu(vbool64_t vm, + vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x4_t test_vloxseg4ei64_v_f16mf2x4_mu(vbool32_t vm, + vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x4_t test_vloxseg4ei64_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f16m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2x4_t test_vloxseg4ei64_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f16m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x4_t test_vloxseg4ei64_v_f32mf2x4_mu(vbool64_t vm, + vfloat32mf2x4_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x4_t test_vloxseg4ei64_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg4ei64_v_f32m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2x4_t test_vloxseg4ei64_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxseg4ei64_v_f32m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x4_t test_vloxseg4ei64_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f64m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2x4_t test_vloxseg4ei64_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_f64m2x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x4_t test_vloxseg4ei64_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxseg4ei64_v_i8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x4_t test_vloxseg4ei64_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg4ei64_v_i8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x4_t test_vloxseg4ei64_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxseg4ei64_v_i8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x4_t test_vloxseg4ei64_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg4ei64_v_i8m1x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x4_t test_vloxseg4ei64_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x4_t test_vloxseg4ei64_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x4_t test_vloxseg4ei64_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxseg4ei64_v_i16m1x4_mu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2x4_t test_vloxseg4ei64_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg4ei64_v_i16m2x4_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x4_t test_vloxseg4ei64_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_i32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x4_t test_vloxseg4ei64_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg4ei64_v_i32m1x4_mu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2x4_t test_vloxseg4ei64_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxseg4ei64_v_i32m2x4_mu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x4_t test_vloxseg4ei64_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxseg4ei64_v_i64m1x4_mu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2x4_t test_vloxseg4ei64_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg4ei64_v_i64m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x4_t test_vloxseg4ei64_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x4_t test_vloxseg4ei64_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x4_t test_vloxseg4ei64_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x4_t test_vloxseg4ei64_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg4ei64_v_u8m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x4_t test_vloxseg4ei64_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x4_t test_vloxseg4ei64_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x4_t test_vloxseg4ei64_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u16m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2x4_t test_vloxseg4ei64_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u16m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x4_t test_vloxseg4ei64_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x4_t test_vloxseg4ei64_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u32m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2x4_t test_vloxseg4ei64_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u32m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x4_t test_vloxseg4ei64_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u64m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2x4_t test_vloxseg4ei64_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg4ei64_v_u64m2x4_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxseg4ei8.c b/auto-generated/policy_funcs/llvm-api-tests/vloxseg4ei8.c index 2d9ab3702..1901fc16f 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxseg4ei8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxseg4ei8.c @@ -1,599 +1,888 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tu(vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_f16mf4x4_tu(vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tu(vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_f16mf2x4_tu(vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tu(vfloat16m1x4_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tu(vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_f16m1x4_tu(vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tu(vfloat16m2x4_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tu(vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_f16m2x4_tu(vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tu(vfloat32mf2x4_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_f32mf2x4_tu(vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tu(vfloat32m1x4_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tu(vfloat32m1x4_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_f32m1x4_tu(vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tu(vfloat32m2x4_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tu(vfloat32m2x4_t vd, + const float *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_f32m2x4_tu(vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tu(vfloat64m1x4_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tu(vfloat64m1x4_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_f64m1x4_tu(vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tu(vfloat64m2x4_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tu(vfloat64m2x4_t vd, + const double *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_f64m2x4_tu(vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_i8mf8x4_tu(vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_i8mf4x4_tu(vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_i8mf2x4_tu(vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_i8m1x4_tu(vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_i8m2x4_tu(vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tu(vint16mf4x4_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_i16mf4x4_tu(vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tu(vint16mf2x4_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_i16mf2x4_tu(vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_i16m1x4_tu(vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_i16m2x4_tu(vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tu(vint32mf2x4_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_i32mf2x4_tu(vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_i32m1x4_tu(vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_i32m2x4_tu(vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_i64m1x4_tu(vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_i64m2x4_tu(vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tu(vuint8mf8x4_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_u8mf8x4_tu(vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tu(vuint8mf4x4_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_u8mf4x4_tu(vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tu(vuint8mf2x4_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_u8mf2x4_tu(vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u8m1x4_tu(vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u8m2x4_tu(vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tu(vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u16mf4x4_tu(vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tu(vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u16mf2x4_tu(vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tu(vuint16m1x4_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u16m1x4_tu(vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tu(vuint16m2x4_t vd, + const uint16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_u16m2x4_tu(vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tu(vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u32mf2x4_tu(vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tu(vuint32m1x4_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u32m1x4_tu(vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tu(vuint32m2x4_t vd, + const uint32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u32m2x4_tu(vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tu(vuint64m1x4_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u64m1x4_tu(vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tu(vuint64m2x4_t vd, + const uint64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u64m2x4_tu(vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tum(vbool64_t vm, + vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_f16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tum(vbool32_t vm, + vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_f16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_f16m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_f16m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tum(vbool64_t vm, + vfloat32mf2x4_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_f32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_f32m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_f32m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_f64m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_f64m2x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_i8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_i8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_i8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_i8m1x4_tum(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, + const int8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_i8m2x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_i16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_i16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_i16m1x4_tum(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_i16m2x4_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_i32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_i32m1x4_tum(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_i32m2x4_tum(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_i64m1x4_tum(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_i64m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_u8m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, + const uint8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_u8m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u16m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u16m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u32m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u32m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u64m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u64m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_tumu(vbool64_t vm, + vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_f16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_tumu(vbool32_t vm, + vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_f16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_f16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_f16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_tumu(vbool64_t vm, + vfloat32mf2x4_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_f32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_f32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_f32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_f64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_f64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_i8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_i8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_i8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_i8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, + const int8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_i8m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_i16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_i16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_i16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_i16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_i32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_i32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_i32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_i64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_i64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_u8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, + const uint8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_u8m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x4_t test_vloxseg4ei8_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_f16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x4_t test_vloxseg4ei8_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_f16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x4_t test_vloxseg4ei8_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_f16m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2x4_t test_vloxseg4ei8_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_f16m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x4_t test_vloxseg4ei8_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_f32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x4_t test_vloxseg4ei8_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_f32m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2x4_t test_vloxseg4ei8_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_f32m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x4_t test_vloxseg4ei8_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_f64m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2x4_t test_vloxseg4ei8_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_f64m2x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x4_t test_vloxseg4ei8_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_i8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x4_t test_vloxseg4ei8_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_i8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x4_t test_vloxseg4ei8_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_i8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x4_t test_vloxseg4ei8_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_i8m1x4_mu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2x4_t test_vloxseg4ei8_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, + const int8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_i8m2x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x4_t test_vloxseg4ei8_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_i16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x4_t test_vloxseg4ei8_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_i16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x4_t test_vloxseg4ei8_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_i16m1x4_mu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2x4_t test_vloxseg4ei8_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_i16m2x4_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x4_t test_vloxseg4ei8_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_i32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x4_t test_vloxseg4ei8_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_i32m1x4_mu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2x4_t test_vloxseg4ei8_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_i32m2x4_mu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x4_t test_vloxseg4ei8_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_i64m1x4_mu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2x4_t test_vloxseg4ei8_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_i64m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x4_t test_vloxseg4ei8_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_u8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x4_t test_vloxseg4ei8_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_u8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x4_t test_vloxseg4ei8_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_u8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x4_t test_vloxseg4ei8_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_u8m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2x4_t test_vloxseg4ei8_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, + const uint8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_u8m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x4_t test_vloxseg4ei8_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x4_t test_vloxseg4ei8_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x4_t test_vloxseg4ei8_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u16m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2x4_t test_vloxseg4ei8_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg4ei8_v_u16m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x4_t test_vloxseg4ei8_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x4_t test_vloxseg4ei8_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u32m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2x4_t test_vloxseg4ei8_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u32m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x4_t test_vloxseg4ei8_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u64m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2x4_t test_vloxseg4ei8_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg4ei8_v_u64m2x4_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxseg5ei16.c b/auto-generated/policy_funcs/llvm-api-tests/vloxseg5ei16.c index fe3e4ab49..7f921ccb0 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxseg5ei16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxseg5ei16.c @@ -1,423 +1,635 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tu(vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_f16mf4x5_tu(vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tu(vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_f16mf2x5_tu(vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tu(vfloat16m1x5_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tu(vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_f16m1x5_tu(vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tu(vfloat32mf2x5_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_f32mf2x5_tu(vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tu(vfloat32m1x5_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tu(vfloat32m1x5_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_f32m1x5_tu(vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tu(vfloat64m1x5_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tu(vfloat64m1x5_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_f64m1x5_tu(vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i8mf8x5_tu(vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i8mf4x5_tu(vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i8mf2x5_tu(vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i8m1x5_tu(vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tu(vint16mf4x5_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i16mf4x5_tu(vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tu(vint16mf2x5_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i16mf2x5_tu(vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i16m1x5_tu(vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tu(vint32mf2x5_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i32mf2x5_tu(vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i32m1x5_tu(vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i64m1x5_tu(vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tu(vuint8mf8x5_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u8mf8x5_tu(vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tu(vuint8mf4x5_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u8mf4x5_tu(vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tu(vuint8mf2x5_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u8mf2x5_tu(vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u8m1x5_tu(vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tu(vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u16mf4x5_tu(vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tu(vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u16mf2x5_tu(vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tu(vuint16m1x5_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u16m1x5_tu(vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tu(vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u32mf2x5_tu(vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tu(vuint32m1x5_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u32m1x5_tu(vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tu(vuint64m1x5_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u64m1x5_tu(vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tum(vbool64_t vm, + vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_f16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tum(vbool32_t vm, + vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_f16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_f16m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tum(vbool64_t vm, + vfloat32mf2x5_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_f32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_f32m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_f64m1x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxseg5ei16_v_i8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg5ei16_v_i8m1x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i16m1x5_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i32m1x5_tum(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i64m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg5ei16_v_u8m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u16m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u32m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u64m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_tumu(vbool64_t vm, + vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_f16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_tumu(vbool32_t vm, + vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_f16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_f16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_tumu(vbool64_t vm, + vfloat32mf2x5_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_f32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_f32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_f64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg5ei16_v_i8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_tumu(vbool64_t vm, + vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_tumu(vbool32_t vm, + vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_tumu(vbool64_t vm, + vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x5_t test_vloxseg5ei16_v_f16mf4x5_mu(vbool64_t vm, + vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_f16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x5_t test_vloxseg5ei16_v_f16mf2x5_mu(vbool32_t vm, + vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_f16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x5_t test_vloxseg5ei16_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_f16m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x5_t test_vloxseg5ei16_v_f32mf2x5_mu(vbool64_t vm, + vfloat32mf2x5_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_f32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x5_t test_vloxseg5ei16_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_f32m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x5_t test_vloxseg5ei16_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_f64m1x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x5_t test_vloxseg5ei16_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxseg5ei16_v_i8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x5_t test_vloxseg5ei16_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxseg5ei16_v_i8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x5_t test_vloxseg5ei16_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxseg5ei16_v_i8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x5_t test_vloxseg5ei16_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg5ei16_v_i8m1x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x5_t test_vloxseg5ei16_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x5_t test_vloxseg5ei16_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x5_t test_vloxseg5ei16_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxseg5ei16_v_i16m1x5_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x5_t test_vloxseg5ei16_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x5_t test_vloxseg5ei16_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i32m1x5_mu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x5_t test_vloxseg5ei16_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_i64m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x5_t test_vloxseg5ei16_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x5_t test_vloxseg5ei16_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x5_t test_vloxseg5ei16_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x5_t test_vloxseg5ei16_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg5ei16_v_u8m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x5_t test_vloxseg5ei16_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x5_t test_vloxseg5ei16_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x5_t test_vloxseg5ei16_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u16m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x5_t test_vloxseg5ei16_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x5_t test_vloxseg5ei16_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u32m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x5_t test_vloxseg5ei16_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei16_v_u64m1x5_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxseg5ei32.c b/auto-generated/policy_funcs/llvm-api-tests/vloxseg5ei32.c index 6ed9764bb..02aabba41 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxseg5ei32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxseg5ei32.c @@ -1,423 +1,635 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tu(vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_f16mf4x5_tu(vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tu(vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_f16mf2x5_tu(vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tu(vfloat16m1x5_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tu(vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_f16m1x5_tu(vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tu(vfloat32mf2x5_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_f32mf2x5_tu(vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tu(vfloat32m1x5_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tu(vfloat32m1x5_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg5ei32_v_f32m1x5_tu(vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tu(vfloat64m1x5_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tu(vfloat64m1x5_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_f64m1x5_tu(vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_i8mf8x5_tu(vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_i8mf4x5_tu(vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_i8mf2x5_tu(vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_i8m1x5_tu(vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tu(vint16mf4x5_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_i16mf4x5_tu(vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tu(vint16mf2x5_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_i16mf2x5_tu(vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_i16m1x5_tu(vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tu(vint32mf2x5_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_i32mf2x5_tu(vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_i32m1x5_tu(vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_i64m1x5_tu(vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tu(vuint8mf8x5_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u8mf8x5_tu(vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tu(vuint8mf4x5_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u8mf4x5_tu(vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tu(vuint8mf2x5_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u8mf2x5_tu(vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u8m1x5_tu(vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tu(vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u16mf4x5_tu(vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tu(vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u16mf2x5_tu(vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tu(vuint16m1x5_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u16m1x5_tu(vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tu(vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u32mf2x5_tu(vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tu(vuint32m1x5_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u32m1x5_tu(vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tu(vuint64m1x5_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u64m1x5_tu(vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tum(vbool64_t vm, + vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_f16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tum(vbool32_t vm, + vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_f16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_f16m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tum(vbool64_t vm, + vfloat32mf2x5_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_f32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_f32m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_f64m1x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_i8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg5ei32_v_i8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxseg5ei32_v_i8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg5ei32_v_i8m1x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_i16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_i16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_i16m1x5_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_i32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_i32m1x5_tum(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_i64m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg5ei32_v_u8m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u16m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u32m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u64m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_tumu(vbool64_t vm, + vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_f16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_tumu(vbool32_t vm, + vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_f16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_f16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_tumu(vbool64_t vm, + vfloat32mf2x5_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_f32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_f32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_f64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_i8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_i8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_i8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg5ei32_v_i8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_i16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_i16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_i16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_i32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_i32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_i64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_tumu(vbool64_t vm, + vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_tumu(vbool32_t vm, + vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_tumu(vbool64_t vm, + vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x5_t test_vloxseg5ei32_v_f16mf4x5_mu(vbool64_t vm, + vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_f16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x5_t test_vloxseg5ei32_v_f16mf2x5_mu(vbool32_t vm, + vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_f16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x5_t test_vloxseg5ei32_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_f16m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x5_t test_vloxseg5ei32_v_f32mf2x5_mu(vbool64_t vm, + vfloat32mf2x5_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_f32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x5_t test_vloxseg5ei32_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg5ei32_v_f32m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x5_t test_vloxseg5ei32_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_f64m1x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x5_t test_vloxseg5ei32_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxseg5ei32_v_i8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x5_t test_vloxseg5ei32_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg5ei32_v_i8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x5_t test_vloxseg5ei32_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxseg5ei32_v_i8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x5_t test_vloxseg5ei32_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg5ei32_v_i8m1x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x5_t test_vloxseg5ei32_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_i16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x5_t test_vloxseg5ei32_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_i16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x5_t test_vloxseg5ei32_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxseg5ei32_v_i16m1x5_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x5_t test_vloxseg5ei32_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_i32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x5_t test_vloxseg5ei32_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg5ei32_v_i32m1x5_mu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x5_t test_vloxseg5ei32_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_i64m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x5_t test_vloxseg5ei32_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x5_t test_vloxseg5ei32_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x5_t test_vloxseg5ei32_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x5_t test_vloxseg5ei32_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg5ei32_v_u8m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x5_t test_vloxseg5ei32_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x5_t test_vloxseg5ei32_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x5_t test_vloxseg5ei32_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u16m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x5_t test_vloxseg5ei32_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x5_t test_vloxseg5ei32_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u32m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x5_t test_vloxseg5ei32_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei32_v_u64m1x5_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxseg5ei64.c b/auto-generated/policy_funcs/llvm-api-tests/vloxseg5ei64.c index f01b1e491..b5d4130ff 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxseg5ei64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxseg5ei64.c @@ -1,423 +1,635 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tu(vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_f16mf4x5_tu(vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tu(vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_f16mf2x5_tu(vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tu(vfloat16m1x5_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tu(vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_f16m1x5_tu(vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tu(vfloat32mf2x5_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_f32mf2x5_tu(vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tu(vfloat32m1x5_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tu(vfloat32m1x5_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg5ei64_v_f32m1x5_tu(vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tu(vfloat64m1x5_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tu(vfloat64m1x5_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_f64m1x5_tu(vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_i8mf8x5_tu(vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_i8mf4x5_tu(vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_i8mf2x5_tu(vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_i8m1x5_tu(vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tu(vint16mf4x5_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_i16mf4x5_tu(vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tu(vint16mf2x5_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_i16mf2x5_tu(vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_i16m1x5_tu(vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tu(vint32mf2x5_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_i32mf2x5_tu(vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_i32m1x5_tu(vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_i64m1x5_tu(vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tu(vuint8mf8x5_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u8mf8x5_tu(vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tu(vuint8mf4x5_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u8mf4x5_tu(vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tu(vuint8mf2x5_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u8mf2x5_tu(vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u8m1x5_tu(vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tu(vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u16mf4x5_tu(vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tu(vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u16mf2x5_tu(vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tu(vuint16m1x5_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u16m1x5_tu(vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tu(vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u32mf2x5_tu(vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tu(vuint32m1x5_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u32m1x5_tu(vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tu(vuint64m1x5_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u64m1x5_tu(vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tum(vbool64_t vm, + vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_f16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tum(vbool32_t vm, + vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_f16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_f16m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tum(vbool64_t vm, + vfloat32mf2x5_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_f32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_f32m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_f64m1x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxseg5ei64_v_i8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg5ei64_v_i8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxseg5ei64_v_i8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg5ei64_v_i8m1x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_i16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_i16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_i16m1x5_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_i32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_i32m1x5_tum(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_i64m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg5ei64_v_u8m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u16m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u32m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u64m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_tumu(vbool64_t vm, + vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_f16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_tumu(vbool32_t vm, + vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_f16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_f16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_tumu(vbool64_t vm, + vfloat32mf2x5_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_f32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_f32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_f64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_i8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_i8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_i8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg5ei64_v_i8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_i16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_i16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_i16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_i32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_i32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_i64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_tumu(vbool64_t vm, + vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_tumu(vbool32_t vm, + vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_tumu(vbool64_t vm, + vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x5_t test_vloxseg5ei64_v_f16mf4x5_mu(vbool64_t vm, + vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_f16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x5_t test_vloxseg5ei64_v_f16mf2x5_mu(vbool32_t vm, + vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_f16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x5_t test_vloxseg5ei64_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_f16m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x5_t test_vloxseg5ei64_v_f32mf2x5_mu(vbool64_t vm, + vfloat32mf2x5_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_f32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x5_t test_vloxseg5ei64_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg5ei64_v_f32m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x5_t test_vloxseg5ei64_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_f64m1x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x5_t test_vloxseg5ei64_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxseg5ei64_v_i8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x5_t test_vloxseg5ei64_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg5ei64_v_i8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x5_t test_vloxseg5ei64_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxseg5ei64_v_i8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x5_t test_vloxseg5ei64_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg5ei64_v_i8m1x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x5_t test_vloxseg5ei64_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_i16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x5_t test_vloxseg5ei64_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_i16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x5_t test_vloxseg5ei64_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxseg5ei64_v_i16m1x5_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x5_t test_vloxseg5ei64_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_i32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x5_t test_vloxseg5ei64_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg5ei64_v_i32m1x5_mu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x5_t test_vloxseg5ei64_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxseg5ei64_v_i64m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x5_t test_vloxseg5ei64_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x5_t test_vloxseg5ei64_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x5_t test_vloxseg5ei64_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x5_t test_vloxseg5ei64_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg5ei64_v_u8m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x5_t test_vloxseg5ei64_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x5_t test_vloxseg5ei64_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x5_t test_vloxseg5ei64_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u16m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x5_t test_vloxseg5ei64_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x5_t test_vloxseg5ei64_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u32m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x5_t test_vloxseg5ei64_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg5ei64_v_u64m1x5_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxseg5ei8.c b/auto-generated/policy_funcs/llvm-api-tests/vloxseg5ei8.c index ea5fca199..13c16988a 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxseg5ei8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxseg5ei8.c @@ -1,423 +1,629 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tu(vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_f16mf4x5_tu(vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tu(vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_f16mf2x5_tu(vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tu(vfloat16m1x5_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tu(vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_f16m1x5_tu(vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tu(vfloat32mf2x5_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_f32mf2x5_tu(vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tu(vfloat32m1x5_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tu(vfloat32m1x5_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_f32m1x5_tu(vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tu(vfloat64m1x5_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tu(vfloat64m1x5_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_f64m1x5_tu(vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_i8mf8x5_tu(vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_i8mf4x5_tu(vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_i8mf2x5_tu(vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_i8m1x5_tu(vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tu(vint16mf4x5_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_i16mf4x5_tu(vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tu(vint16mf2x5_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_i16mf2x5_tu(vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_i16m1x5_tu(vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tu(vint32mf2x5_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_i32mf2x5_tu(vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_i32m1x5_tu(vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_i64m1x5_tu(vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tu(vuint8mf8x5_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_u8mf8x5_tu(vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tu(vuint8mf4x5_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_u8mf4x5_tu(vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tu(vuint8mf2x5_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_u8mf2x5_tu(vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_u8m1x5_tu(vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tu(vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_u16mf4x5_tu(vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tu(vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_u16mf2x5_tu(vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tu(vuint16m1x5_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_u16m1x5_tu(vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tu(vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_u32mf2x5_tu(vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tu(vuint32m1x5_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_u32m1x5_tu(vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tu(vuint64m1x5_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_u64m1x5_tu(vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tum(vbool64_t vm, + vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_f16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tum(vbool32_t vm, + vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_f16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_f16m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tum(vbool64_t vm, + vfloat32mf2x5_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_f32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_f32m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_f64m1x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_i8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_i8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_i8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_i8m1x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_i16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_i16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_i16m1x5_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_i32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_i32m1x5_tum(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_i64m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_u8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_u8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_u8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_u8m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_u16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_u16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_u16m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_u32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_u32m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_u64m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_tumu(vbool64_t vm, + vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_f16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_tumu(vbool32_t vm, + vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_f16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_f16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_tumu(vbool64_t vm, + vfloat32mf2x5_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_f32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_f32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_f64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_i8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_i8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_i8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_i8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_i16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_i16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_i16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_i32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_i32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_i64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_u8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_u8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_u8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_u8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_u16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_u16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_u16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_u32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_u32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_u64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x5_t test_vloxseg5ei8_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_f16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x5_t test_vloxseg5ei8_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_f16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x5_t test_vloxseg5ei8_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_f16m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x5_t test_vloxseg5ei8_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_f32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x5_t test_vloxseg5ei8_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_f32m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x5_t test_vloxseg5ei8_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_f64m1x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x5_t test_vloxseg5ei8_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_i8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x5_t test_vloxseg5ei8_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_i8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x5_t test_vloxseg5ei8_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_i8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x5_t test_vloxseg5ei8_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_i8m1x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x5_t test_vloxseg5ei8_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_i16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x5_t test_vloxseg5ei8_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_i16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x5_t test_vloxseg5ei8_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_i16m1x5_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x5_t test_vloxseg5ei8_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_i32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x5_t test_vloxseg5ei8_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_i32m1x5_mu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x5_t test_vloxseg5ei8_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_i64m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x5_t test_vloxseg5ei8_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_u8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x5_t test_vloxseg5ei8_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_u8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x5_t test_vloxseg5ei8_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_u8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x5_t test_vloxseg5ei8_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg5ei8_v_u8m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x5_t test_vloxseg5ei8_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_u16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x5_t test_vloxseg5ei8_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_u16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x5_t test_vloxseg5ei8_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_u16m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x5_t test_vloxseg5ei8_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_u32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x5_t test_vloxseg5ei8_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_u32m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x5_t test_vloxseg5ei8_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg5ei8_v_u64m1x5_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxseg6ei16.c b/auto-generated/policy_funcs/llvm-api-tests/vloxseg6ei16.c index 4201a2c15..5ef719e68 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxseg6ei16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxseg6ei16.c @@ -1,423 +1,635 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tu(vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_f16mf4x6_tu(vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tu(vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_f16mf2x6_tu(vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tu(vfloat16m1x6_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tu(vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_f16m1x6_tu(vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tu(vfloat32mf2x6_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_f32mf2x6_tu(vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tu(vfloat32m1x6_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tu(vfloat32m1x6_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_f32m1x6_tu(vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tu(vfloat64m1x6_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tu(vfloat64m1x6_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_f64m1x6_tu(vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i8mf8x6_tu(vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i8mf4x6_tu(vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i8mf2x6_tu(vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i8m1x6_tu(vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tu(vint16mf4x6_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i16mf4x6_tu(vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tu(vint16mf2x6_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i16mf2x6_tu(vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i16m1x6_tu(vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tu(vint32mf2x6_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i32mf2x6_tu(vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i32m1x6_tu(vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i64m1x6_tu(vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tu(vuint8mf8x6_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u8mf8x6_tu(vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tu(vuint8mf4x6_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u8mf4x6_tu(vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tu(vuint8mf2x6_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u8mf2x6_tu(vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u8m1x6_tu(vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tu(vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u16mf4x6_tu(vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tu(vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u16mf2x6_tu(vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tu(vuint16m1x6_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u16m1x6_tu(vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tu(vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u32mf2x6_tu(vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tu(vuint32m1x6_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u32m1x6_tu(vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tu(vuint64m1x6_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u64m1x6_tu(vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tum(vbool64_t vm, + vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_f16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tum(vbool32_t vm, + vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_f16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_f16m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tum(vbool64_t vm, + vfloat32mf2x6_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_f32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_f32m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_f64m1x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxseg6ei16_v_i8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg6ei16_v_i8m1x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i16m1x6_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i32m1x6_tum(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i64m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg6ei16_v_u8m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u16m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u32m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u64m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_tumu(vbool64_t vm, + vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_f16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_tumu(vbool32_t vm, + vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_f16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_f16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_tumu(vbool64_t vm, + vfloat32mf2x6_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_f32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_f32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_f64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg6ei16_v_i8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_tumu(vbool64_t vm, + vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_tumu(vbool32_t vm, + vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_tumu(vbool64_t vm, + vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x6_t test_vloxseg6ei16_v_f16mf4x6_mu(vbool64_t vm, + vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_f16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x6_t test_vloxseg6ei16_v_f16mf2x6_mu(vbool32_t vm, + vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_f16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x6_t test_vloxseg6ei16_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_f16m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x6_t test_vloxseg6ei16_v_f32mf2x6_mu(vbool64_t vm, + vfloat32mf2x6_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_f32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x6_t test_vloxseg6ei16_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_f32m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x6_t test_vloxseg6ei16_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_f64m1x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x6_t test_vloxseg6ei16_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxseg6ei16_v_i8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x6_t test_vloxseg6ei16_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxseg6ei16_v_i8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x6_t test_vloxseg6ei16_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxseg6ei16_v_i8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x6_t test_vloxseg6ei16_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg6ei16_v_i8m1x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x6_t test_vloxseg6ei16_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x6_t test_vloxseg6ei16_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x6_t test_vloxseg6ei16_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxseg6ei16_v_i16m1x6_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x6_t test_vloxseg6ei16_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x6_t test_vloxseg6ei16_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i32m1x6_mu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x6_t test_vloxseg6ei16_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_i64m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x6_t test_vloxseg6ei16_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x6_t test_vloxseg6ei16_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x6_t test_vloxseg6ei16_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x6_t test_vloxseg6ei16_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg6ei16_v_u8m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x6_t test_vloxseg6ei16_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x6_t test_vloxseg6ei16_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x6_t test_vloxseg6ei16_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u16m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x6_t test_vloxseg6ei16_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x6_t test_vloxseg6ei16_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u32m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x6_t test_vloxseg6ei16_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei16_v_u64m1x6_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxseg6ei32.c b/auto-generated/policy_funcs/llvm-api-tests/vloxseg6ei32.c index 49986992e..a80125ce8 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxseg6ei32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxseg6ei32.c @@ -1,423 +1,635 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tu(vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_f16mf4x6_tu(vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tu(vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_f16mf2x6_tu(vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tu(vfloat16m1x6_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tu(vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_f16m1x6_tu(vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tu(vfloat32mf2x6_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_f32mf2x6_tu(vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tu(vfloat32m1x6_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tu(vfloat32m1x6_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg6ei32_v_f32m1x6_tu(vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tu(vfloat64m1x6_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tu(vfloat64m1x6_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_f64m1x6_tu(vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_i8mf8x6_tu(vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_i8mf4x6_tu(vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_i8mf2x6_tu(vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_i8m1x6_tu(vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tu(vint16mf4x6_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_i16mf4x6_tu(vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tu(vint16mf2x6_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_i16mf2x6_tu(vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_i16m1x6_tu(vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tu(vint32mf2x6_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_i32mf2x6_tu(vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_i32m1x6_tu(vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_i64m1x6_tu(vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tu(vuint8mf8x6_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u8mf8x6_tu(vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tu(vuint8mf4x6_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u8mf4x6_tu(vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tu(vuint8mf2x6_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u8mf2x6_tu(vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u8m1x6_tu(vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tu(vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u16mf4x6_tu(vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tu(vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u16mf2x6_tu(vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tu(vuint16m1x6_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u16m1x6_tu(vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tu(vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u32mf2x6_tu(vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tu(vuint32m1x6_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u32m1x6_tu(vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tu(vuint64m1x6_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u64m1x6_tu(vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tum(vbool64_t vm, + vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_f16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tum(vbool32_t vm, + vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_f16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_f16m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tum(vbool64_t vm, + vfloat32mf2x6_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_f32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_f32m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_f64m1x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_i8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg6ei32_v_i8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxseg6ei32_v_i8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg6ei32_v_i8m1x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_i16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_i16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_i16m1x6_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_i32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_i32m1x6_tum(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_i64m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg6ei32_v_u8m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u16m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u32m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u64m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_tumu(vbool64_t vm, + vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_f16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_tumu(vbool32_t vm, + vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_f16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_f16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_tumu(vbool64_t vm, + vfloat32mf2x6_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_f32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_f32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_f64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_i8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_i8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_i8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg6ei32_v_i8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_i16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_i16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_i16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_i32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_i32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_i64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_tumu(vbool64_t vm, + vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_tumu(vbool32_t vm, + vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_tumu(vbool64_t vm, + vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x6_t test_vloxseg6ei32_v_f16mf4x6_mu(vbool64_t vm, + vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_f16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x6_t test_vloxseg6ei32_v_f16mf2x6_mu(vbool32_t vm, + vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_f16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x6_t test_vloxseg6ei32_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_f16m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x6_t test_vloxseg6ei32_v_f32mf2x6_mu(vbool64_t vm, + vfloat32mf2x6_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_f32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x6_t test_vloxseg6ei32_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg6ei32_v_f32m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x6_t test_vloxseg6ei32_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_f64m1x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x6_t test_vloxseg6ei32_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxseg6ei32_v_i8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x6_t test_vloxseg6ei32_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg6ei32_v_i8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x6_t test_vloxseg6ei32_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxseg6ei32_v_i8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x6_t test_vloxseg6ei32_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg6ei32_v_i8m1x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x6_t test_vloxseg6ei32_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_i16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x6_t test_vloxseg6ei32_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_i16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x6_t test_vloxseg6ei32_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxseg6ei32_v_i16m1x6_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x6_t test_vloxseg6ei32_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_i32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x6_t test_vloxseg6ei32_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg6ei32_v_i32m1x6_mu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x6_t test_vloxseg6ei32_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_i64m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x6_t test_vloxseg6ei32_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x6_t test_vloxseg6ei32_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x6_t test_vloxseg6ei32_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x6_t test_vloxseg6ei32_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg6ei32_v_u8m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x6_t test_vloxseg6ei32_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x6_t test_vloxseg6ei32_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x6_t test_vloxseg6ei32_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u16m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x6_t test_vloxseg6ei32_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x6_t test_vloxseg6ei32_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u32m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x6_t test_vloxseg6ei32_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei32_v_u64m1x6_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxseg6ei64.c b/auto-generated/policy_funcs/llvm-api-tests/vloxseg6ei64.c index d0c41237b..0b150b8cd 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxseg6ei64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxseg6ei64.c @@ -1,423 +1,635 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tu(vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_f16mf4x6_tu(vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tu(vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_f16mf2x6_tu(vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tu(vfloat16m1x6_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tu(vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_f16m1x6_tu(vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tu(vfloat32mf2x6_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_f32mf2x6_tu(vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tu(vfloat32m1x6_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tu(vfloat32m1x6_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg6ei64_v_f32m1x6_tu(vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tu(vfloat64m1x6_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tu(vfloat64m1x6_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_f64m1x6_tu(vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_i8mf8x6_tu(vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_i8mf4x6_tu(vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_i8mf2x6_tu(vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_i8m1x6_tu(vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tu(vint16mf4x6_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_i16mf4x6_tu(vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tu(vint16mf2x6_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_i16mf2x6_tu(vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_i16m1x6_tu(vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tu(vint32mf2x6_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_i32mf2x6_tu(vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_i32m1x6_tu(vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_i64m1x6_tu(vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tu(vuint8mf8x6_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u8mf8x6_tu(vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tu(vuint8mf4x6_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u8mf4x6_tu(vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tu(vuint8mf2x6_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u8mf2x6_tu(vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u8m1x6_tu(vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tu(vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u16mf4x6_tu(vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tu(vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u16mf2x6_tu(vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tu(vuint16m1x6_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u16m1x6_tu(vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tu(vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u32mf2x6_tu(vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tu(vuint32m1x6_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u32m1x6_tu(vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tu(vuint64m1x6_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u64m1x6_tu(vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tum(vbool64_t vm, + vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_f16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tum(vbool32_t vm, + vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_f16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_f16m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tum(vbool64_t vm, + vfloat32mf2x6_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_f32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_f32m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_f64m1x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxseg6ei64_v_i8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg6ei64_v_i8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxseg6ei64_v_i8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg6ei64_v_i8m1x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_i16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_i16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_i16m1x6_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_i32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_i32m1x6_tum(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_i64m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg6ei64_v_u8m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u16m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u32m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u64m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_tumu(vbool64_t vm, + vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_f16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_tumu(vbool32_t vm, + vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_f16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_f16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_tumu(vbool64_t vm, + vfloat32mf2x6_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_f32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_f32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_f64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_i8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_i8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_i8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg6ei64_v_i8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_i16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_i16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_i16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_i32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_i32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_i64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_tumu(vbool64_t vm, + vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_tumu(vbool32_t vm, + vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_tumu(vbool64_t vm, + vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x6_t test_vloxseg6ei64_v_f16mf4x6_mu(vbool64_t vm, + vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_f16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x6_t test_vloxseg6ei64_v_f16mf2x6_mu(vbool32_t vm, + vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_f16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x6_t test_vloxseg6ei64_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_f16m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x6_t test_vloxseg6ei64_v_f32mf2x6_mu(vbool64_t vm, + vfloat32mf2x6_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_f32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x6_t test_vloxseg6ei64_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg6ei64_v_f32m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x6_t test_vloxseg6ei64_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_f64m1x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x6_t test_vloxseg6ei64_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxseg6ei64_v_i8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x6_t test_vloxseg6ei64_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg6ei64_v_i8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x6_t test_vloxseg6ei64_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxseg6ei64_v_i8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x6_t test_vloxseg6ei64_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg6ei64_v_i8m1x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x6_t test_vloxseg6ei64_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_i16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x6_t test_vloxseg6ei64_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_i16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x6_t test_vloxseg6ei64_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxseg6ei64_v_i16m1x6_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x6_t test_vloxseg6ei64_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_i32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x6_t test_vloxseg6ei64_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg6ei64_v_i32m1x6_mu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x6_t test_vloxseg6ei64_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxseg6ei64_v_i64m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x6_t test_vloxseg6ei64_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x6_t test_vloxseg6ei64_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x6_t test_vloxseg6ei64_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x6_t test_vloxseg6ei64_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg6ei64_v_u8m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x6_t test_vloxseg6ei64_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x6_t test_vloxseg6ei64_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x6_t test_vloxseg6ei64_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u16m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x6_t test_vloxseg6ei64_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x6_t test_vloxseg6ei64_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u32m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x6_t test_vloxseg6ei64_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg6ei64_v_u64m1x6_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxseg6ei8.c b/auto-generated/policy_funcs/llvm-api-tests/vloxseg6ei8.c index 4166f37ad..21d1d7b96 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxseg6ei8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxseg6ei8.c @@ -1,423 +1,629 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tu(vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_f16mf4x6_tu(vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tu(vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_f16mf2x6_tu(vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tu(vfloat16m1x6_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tu(vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_f16m1x6_tu(vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tu(vfloat32mf2x6_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_f32mf2x6_tu(vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tu(vfloat32m1x6_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tu(vfloat32m1x6_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_f32m1x6_tu(vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tu(vfloat64m1x6_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tu(vfloat64m1x6_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_f64m1x6_tu(vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_i8mf8x6_tu(vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_i8mf4x6_tu(vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_i8mf2x6_tu(vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_i8m1x6_tu(vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tu(vint16mf4x6_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_i16mf4x6_tu(vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tu(vint16mf2x6_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_i16mf2x6_tu(vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_i16m1x6_tu(vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tu(vint32mf2x6_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_i32mf2x6_tu(vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_i32m1x6_tu(vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_i64m1x6_tu(vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tu(vuint8mf8x6_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_u8mf8x6_tu(vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tu(vuint8mf4x6_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_u8mf4x6_tu(vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tu(vuint8mf2x6_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_u8mf2x6_tu(vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_u8m1x6_tu(vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tu(vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_u16mf4x6_tu(vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tu(vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_u16mf2x6_tu(vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tu(vuint16m1x6_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_u16m1x6_tu(vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tu(vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_u32mf2x6_tu(vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tu(vuint32m1x6_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_u32m1x6_tu(vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tu(vuint64m1x6_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_u64m1x6_tu(vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tum(vbool64_t vm, + vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_f16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tum(vbool32_t vm, + vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_f16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_f16m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tum(vbool64_t vm, + vfloat32mf2x6_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_f32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_f32m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_f64m1x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_i8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_i8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_i8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_i8m1x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_i16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_i16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_i16m1x6_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_i32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_i32m1x6_tum(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_i64m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_u8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_u8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_u8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_u8m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_u16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_u16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_u16m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_u32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_u32m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_u64m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_tumu(vbool64_t vm, + vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_f16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_tumu(vbool32_t vm, + vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_f16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_f16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_tumu(vbool64_t vm, + vfloat32mf2x6_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_f32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_f32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_f64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_i8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_i8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_i8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_i8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_i16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_i16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_i16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_i32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_i32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_i64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_u8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_u8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_u8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_u8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_u16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_u16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_u16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_u32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_u32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_u64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x6_t test_vloxseg6ei8_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_f16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x6_t test_vloxseg6ei8_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_f16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x6_t test_vloxseg6ei8_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_f16m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x6_t test_vloxseg6ei8_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_f32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x6_t test_vloxseg6ei8_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_f32m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x6_t test_vloxseg6ei8_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_f64m1x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x6_t test_vloxseg6ei8_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_i8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x6_t test_vloxseg6ei8_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_i8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x6_t test_vloxseg6ei8_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_i8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x6_t test_vloxseg6ei8_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_i8m1x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x6_t test_vloxseg6ei8_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_i16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x6_t test_vloxseg6ei8_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_i16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x6_t test_vloxseg6ei8_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_i16m1x6_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x6_t test_vloxseg6ei8_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_i32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x6_t test_vloxseg6ei8_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_i32m1x6_mu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x6_t test_vloxseg6ei8_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_i64m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x6_t test_vloxseg6ei8_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_u8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x6_t test_vloxseg6ei8_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_u8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x6_t test_vloxseg6ei8_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_u8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x6_t test_vloxseg6ei8_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg6ei8_v_u8m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x6_t test_vloxseg6ei8_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_u16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x6_t test_vloxseg6ei8_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_u16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x6_t test_vloxseg6ei8_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_u16m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x6_t test_vloxseg6ei8_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_u32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x6_t test_vloxseg6ei8_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_u32m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x6_t test_vloxseg6ei8_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg6ei8_v_u64m1x6_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxseg7ei16.c b/auto-generated/policy_funcs/llvm-api-tests/vloxseg7ei16.c index f29377190..a5f3e9cbc 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxseg7ei16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxseg7ei16.c @@ -1,423 +1,635 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tu(vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_f16mf4x7_tu(vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tu(vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_f16mf2x7_tu(vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tu(vfloat16m1x7_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tu(vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_f16m1x7_tu(vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tu(vfloat32mf2x7_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_f32mf2x7_tu(vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tu(vfloat32m1x7_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tu(vfloat32m1x7_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_f32m1x7_tu(vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tu(vfloat64m1x7_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tu(vfloat64m1x7_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_f64m1x7_tu(vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i8mf8x7_tu(vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i8mf4x7_tu(vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i8mf2x7_tu(vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i8m1x7_tu(vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tu(vint16mf4x7_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i16mf4x7_tu(vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tu(vint16mf2x7_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i16mf2x7_tu(vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i16m1x7_tu(vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tu(vint32mf2x7_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i32mf2x7_tu(vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i32m1x7_tu(vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i64m1x7_tu(vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tu(vuint8mf8x7_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u8mf8x7_tu(vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tu(vuint8mf4x7_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u8mf4x7_tu(vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tu(vuint8mf2x7_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u8mf2x7_tu(vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u8m1x7_tu(vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tu(vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u16mf4x7_tu(vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tu(vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u16mf2x7_tu(vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tu(vuint16m1x7_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u16m1x7_tu(vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tu(vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u32mf2x7_tu(vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tu(vuint32m1x7_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u32m1x7_tu(vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tu(vuint64m1x7_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u64m1x7_tu(vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tum(vbool64_t vm, + vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_f16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tum(vbool32_t vm, + vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_f16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_f16m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tum(vbool64_t vm, + vfloat32mf2x7_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_f32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_f32m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_f64m1x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxseg7ei16_v_i8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg7ei16_v_i8m1x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i16m1x7_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i32m1x7_tum(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i64m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg7ei16_v_u8m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u16m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u32m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u64m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_tumu(vbool64_t vm, + vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_f16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_tumu(vbool32_t vm, + vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_f16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_f16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_tumu(vbool64_t vm, + vfloat32mf2x7_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_f32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_f32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_f64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg7ei16_v_i8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_tumu(vbool64_t vm, + vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_tumu(vbool32_t vm, + vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_tumu(vbool64_t vm, + vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x7_t test_vloxseg7ei16_v_f16mf4x7_mu(vbool64_t vm, + vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_f16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x7_t test_vloxseg7ei16_v_f16mf2x7_mu(vbool32_t vm, + vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_f16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x7_t test_vloxseg7ei16_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_f16m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x7_t test_vloxseg7ei16_v_f32mf2x7_mu(vbool64_t vm, + vfloat32mf2x7_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_f32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x7_t test_vloxseg7ei16_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_f32m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x7_t test_vloxseg7ei16_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_f64m1x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x7_t test_vloxseg7ei16_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxseg7ei16_v_i8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x7_t test_vloxseg7ei16_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxseg7ei16_v_i8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x7_t test_vloxseg7ei16_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxseg7ei16_v_i8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x7_t test_vloxseg7ei16_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg7ei16_v_i8m1x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x7_t test_vloxseg7ei16_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x7_t test_vloxseg7ei16_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x7_t test_vloxseg7ei16_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxseg7ei16_v_i16m1x7_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x7_t test_vloxseg7ei16_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x7_t test_vloxseg7ei16_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i32m1x7_mu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x7_t test_vloxseg7ei16_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_i64m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x7_t test_vloxseg7ei16_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x7_t test_vloxseg7ei16_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x7_t test_vloxseg7ei16_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x7_t test_vloxseg7ei16_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg7ei16_v_u8m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x7_t test_vloxseg7ei16_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x7_t test_vloxseg7ei16_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x7_t test_vloxseg7ei16_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u16m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x7_t test_vloxseg7ei16_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x7_t test_vloxseg7ei16_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u32m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x7_t test_vloxseg7ei16_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei16_v_u64m1x7_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxseg7ei32.c b/auto-generated/policy_funcs/llvm-api-tests/vloxseg7ei32.c index cb724a9f7..e1a0b5c79 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxseg7ei32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxseg7ei32.c @@ -1,423 +1,635 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tu(vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_f16mf4x7_tu(vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tu(vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_f16mf2x7_tu(vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tu(vfloat16m1x7_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tu(vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_f16m1x7_tu(vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tu(vfloat32mf2x7_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_f32mf2x7_tu(vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tu(vfloat32m1x7_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tu(vfloat32m1x7_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg7ei32_v_f32m1x7_tu(vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tu(vfloat64m1x7_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tu(vfloat64m1x7_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_f64m1x7_tu(vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_i8mf8x7_tu(vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_i8mf4x7_tu(vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_i8mf2x7_tu(vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_i8m1x7_tu(vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tu(vint16mf4x7_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_i16mf4x7_tu(vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tu(vint16mf2x7_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_i16mf2x7_tu(vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_i16m1x7_tu(vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tu(vint32mf2x7_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_i32mf2x7_tu(vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_i32m1x7_tu(vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_i64m1x7_tu(vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tu(vuint8mf8x7_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u8mf8x7_tu(vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tu(vuint8mf4x7_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u8mf4x7_tu(vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tu(vuint8mf2x7_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u8mf2x7_tu(vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u8m1x7_tu(vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tu(vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u16mf4x7_tu(vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tu(vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u16mf2x7_tu(vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tu(vuint16m1x7_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u16m1x7_tu(vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tu(vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u32mf2x7_tu(vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tu(vuint32m1x7_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u32m1x7_tu(vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tu(vuint64m1x7_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u64m1x7_tu(vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tum(vbool64_t vm, + vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_f16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tum(vbool32_t vm, + vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_f16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_f16m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tum(vbool64_t vm, + vfloat32mf2x7_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_f32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_f32m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_f64m1x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_i8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg7ei32_v_i8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxseg7ei32_v_i8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg7ei32_v_i8m1x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_i16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_i16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_i16m1x7_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_i32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_i32m1x7_tum(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_i64m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg7ei32_v_u8m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u16m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u32m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u64m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_tumu(vbool64_t vm, + vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_f16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_tumu(vbool32_t vm, + vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_f16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_f16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_tumu(vbool64_t vm, + vfloat32mf2x7_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_f32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_f32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_f64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_i8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_i8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_i8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg7ei32_v_i8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_i16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_i16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_i16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_i32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_i32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_i64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_tumu(vbool64_t vm, + vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_tumu(vbool32_t vm, + vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_tumu(vbool64_t vm, + vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x7_t test_vloxseg7ei32_v_f16mf4x7_mu(vbool64_t vm, + vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_f16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x7_t test_vloxseg7ei32_v_f16mf2x7_mu(vbool32_t vm, + vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_f16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x7_t test_vloxseg7ei32_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_f16m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x7_t test_vloxseg7ei32_v_f32mf2x7_mu(vbool64_t vm, + vfloat32mf2x7_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_f32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x7_t test_vloxseg7ei32_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg7ei32_v_f32m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x7_t test_vloxseg7ei32_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_f64m1x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x7_t test_vloxseg7ei32_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxseg7ei32_v_i8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x7_t test_vloxseg7ei32_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg7ei32_v_i8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x7_t test_vloxseg7ei32_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxseg7ei32_v_i8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x7_t test_vloxseg7ei32_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg7ei32_v_i8m1x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x7_t test_vloxseg7ei32_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_i16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x7_t test_vloxseg7ei32_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_i16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x7_t test_vloxseg7ei32_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxseg7ei32_v_i16m1x7_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x7_t test_vloxseg7ei32_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_i32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x7_t test_vloxseg7ei32_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg7ei32_v_i32m1x7_mu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x7_t test_vloxseg7ei32_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_i64m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x7_t test_vloxseg7ei32_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x7_t test_vloxseg7ei32_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x7_t test_vloxseg7ei32_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x7_t test_vloxseg7ei32_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg7ei32_v_u8m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x7_t test_vloxseg7ei32_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x7_t test_vloxseg7ei32_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x7_t test_vloxseg7ei32_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u16m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x7_t test_vloxseg7ei32_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x7_t test_vloxseg7ei32_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u32m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x7_t test_vloxseg7ei32_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei32_v_u64m1x7_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxseg7ei64.c b/auto-generated/policy_funcs/llvm-api-tests/vloxseg7ei64.c index 532c4085a..d6360f176 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxseg7ei64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxseg7ei64.c @@ -1,423 +1,635 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tu(vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_f16mf4x7_tu(vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tu(vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_f16mf2x7_tu(vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tu(vfloat16m1x7_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tu(vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_f16m1x7_tu(vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tu(vfloat32mf2x7_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_f32mf2x7_tu(vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tu(vfloat32m1x7_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tu(vfloat32m1x7_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg7ei64_v_f32m1x7_tu(vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tu(vfloat64m1x7_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tu(vfloat64m1x7_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_f64m1x7_tu(vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_i8mf8x7_tu(vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_i8mf4x7_tu(vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_i8mf2x7_tu(vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_i8m1x7_tu(vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tu(vint16mf4x7_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_i16mf4x7_tu(vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tu(vint16mf2x7_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_i16mf2x7_tu(vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_i16m1x7_tu(vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tu(vint32mf2x7_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_i32mf2x7_tu(vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_i32m1x7_tu(vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_i64m1x7_tu(vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tu(vuint8mf8x7_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u8mf8x7_tu(vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tu(vuint8mf4x7_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u8mf4x7_tu(vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tu(vuint8mf2x7_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u8mf2x7_tu(vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u8m1x7_tu(vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tu(vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u16mf4x7_tu(vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tu(vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u16mf2x7_tu(vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tu(vuint16m1x7_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u16m1x7_tu(vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tu(vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u32mf2x7_tu(vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tu(vuint32m1x7_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u32m1x7_tu(vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tu(vuint64m1x7_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u64m1x7_tu(vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tum(vbool64_t vm, + vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_f16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tum(vbool32_t vm, + vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_f16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_f16m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tum(vbool64_t vm, + vfloat32mf2x7_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_f32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_f32m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_f64m1x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxseg7ei64_v_i8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg7ei64_v_i8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxseg7ei64_v_i8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg7ei64_v_i8m1x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_i16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_i16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_i16m1x7_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_i32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_i32m1x7_tum(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_i64m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg7ei64_v_u8m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u16m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u32m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u64m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_tumu(vbool64_t vm, + vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_f16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_tumu(vbool32_t vm, + vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_f16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_f16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_tumu(vbool64_t vm, + vfloat32mf2x7_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_f32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_f32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_f64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_i8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_i8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_i8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg7ei64_v_i8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_i16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_i16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_i16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_i32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_i32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_i64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_tumu(vbool64_t vm, + vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_tumu(vbool32_t vm, + vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_tumu(vbool64_t vm, + vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x7_t test_vloxseg7ei64_v_f16mf4x7_mu(vbool64_t vm, + vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_f16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x7_t test_vloxseg7ei64_v_f16mf2x7_mu(vbool32_t vm, + vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_f16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x7_t test_vloxseg7ei64_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_f16m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x7_t test_vloxseg7ei64_v_f32mf2x7_mu(vbool64_t vm, + vfloat32mf2x7_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_f32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x7_t test_vloxseg7ei64_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg7ei64_v_f32m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x7_t test_vloxseg7ei64_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_f64m1x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x7_t test_vloxseg7ei64_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxseg7ei64_v_i8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x7_t test_vloxseg7ei64_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg7ei64_v_i8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x7_t test_vloxseg7ei64_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxseg7ei64_v_i8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x7_t test_vloxseg7ei64_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg7ei64_v_i8m1x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x7_t test_vloxseg7ei64_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_i16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x7_t test_vloxseg7ei64_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_i16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x7_t test_vloxseg7ei64_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxseg7ei64_v_i16m1x7_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x7_t test_vloxseg7ei64_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_i32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x7_t test_vloxseg7ei64_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg7ei64_v_i32m1x7_mu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x7_t test_vloxseg7ei64_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxseg7ei64_v_i64m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x7_t test_vloxseg7ei64_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x7_t test_vloxseg7ei64_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x7_t test_vloxseg7ei64_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x7_t test_vloxseg7ei64_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg7ei64_v_u8m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x7_t test_vloxseg7ei64_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x7_t test_vloxseg7ei64_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x7_t test_vloxseg7ei64_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u16m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x7_t test_vloxseg7ei64_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x7_t test_vloxseg7ei64_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u32m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x7_t test_vloxseg7ei64_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg7ei64_v_u64m1x7_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxseg7ei8.c b/auto-generated/policy_funcs/llvm-api-tests/vloxseg7ei8.c index 6f569d4ff..b18436bba 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxseg7ei8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxseg7ei8.c @@ -1,423 +1,629 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tu(vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_f16mf4x7_tu(vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tu(vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_f16mf2x7_tu(vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tu(vfloat16m1x7_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tu(vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_f16m1x7_tu(vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tu(vfloat32mf2x7_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_f32mf2x7_tu(vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tu(vfloat32m1x7_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tu(vfloat32m1x7_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_f32m1x7_tu(vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tu(vfloat64m1x7_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tu(vfloat64m1x7_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_f64m1x7_tu(vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_i8mf8x7_tu(vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_i8mf4x7_tu(vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_i8mf2x7_tu(vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_i8m1x7_tu(vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tu(vint16mf4x7_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_i16mf4x7_tu(vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tu(vint16mf2x7_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_i16mf2x7_tu(vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_i16m1x7_tu(vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tu(vint32mf2x7_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_i32mf2x7_tu(vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_i32m1x7_tu(vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_i64m1x7_tu(vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tu(vuint8mf8x7_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_u8mf8x7_tu(vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tu(vuint8mf4x7_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_u8mf4x7_tu(vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tu(vuint8mf2x7_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_u8mf2x7_tu(vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_u8m1x7_tu(vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tu(vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_u16mf4x7_tu(vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tu(vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_u16mf2x7_tu(vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tu(vuint16m1x7_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_u16m1x7_tu(vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tu(vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_u32mf2x7_tu(vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tu(vuint32m1x7_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_u32m1x7_tu(vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tu(vuint64m1x7_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_u64m1x7_tu(vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tum(vbool64_t vm, + vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_f16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tum(vbool32_t vm, + vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_f16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_f16m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tum(vbool64_t vm, + vfloat32mf2x7_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_f32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_f32m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_f64m1x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_i8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_i8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_i8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_i8m1x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_i16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_i16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_i16m1x7_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_i32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_i32m1x7_tum(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_i64m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_u8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_u8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_u8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_u8m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_u16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_u16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_u16m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_u32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_u32m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_u64m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_tumu(vbool64_t vm, + vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_f16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_tumu(vbool32_t vm, + vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_f16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_f16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_tumu(vbool64_t vm, + vfloat32mf2x7_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_f32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_f32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_f64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_i8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_i8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_i8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_i8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_i16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_i16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_i16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_i32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_i32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_i64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_u8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_u8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_u8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_u8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_u16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_u16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_u16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_u32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_u32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_u64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x7_t test_vloxseg7ei8_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_f16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x7_t test_vloxseg7ei8_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_f16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x7_t test_vloxseg7ei8_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_f16m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x7_t test_vloxseg7ei8_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_f32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x7_t test_vloxseg7ei8_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_f32m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x7_t test_vloxseg7ei8_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_f64m1x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x7_t test_vloxseg7ei8_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_i8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x7_t test_vloxseg7ei8_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_i8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x7_t test_vloxseg7ei8_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_i8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x7_t test_vloxseg7ei8_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_i8m1x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x7_t test_vloxseg7ei8_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_i16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x7_t test_vloxseg7ei8_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_i16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x7_t test_vloxseg7ei8_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_i16m1x7_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x7_t test_vloxseg7ei8_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_i32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x7_t test_vloxseg7ei8_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_i32m1x7_mu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x7_t test_vloxseg7ei8_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_i64m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x7_t test_vloxseg7ei8_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_u8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x7_t test_vloxseg7ei8_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_u8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x7_t test_vloxseg7ei8_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_u8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x7_t test_vloxseg7ei8_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg7ei8_v_u8m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x7_t test_vloxseg7ei8_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_u16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x7_t test_vloxseg7ei8_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_u16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x7_t test_vloxseg7ei8_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_u16m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x7_t test_vloxseg7ei8_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_u32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x7_t test_vloxseg7ei8_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_u32m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x7_t test_vloxseg7ei8_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg7ei8_v_u64m1x7_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxseg8ei16.c b/auto-generated/policy_funcs/llvm-api-tests/vloxseg8ei16.c index dcd7631eb..789117ac3 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxseg8ei16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxseg8ei16.c @@ -1,423 +1,635 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tu(vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_f16mf4x8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tu(vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_f16mf2x8_tu(vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tu(vfloat16m1x8_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tu(vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_f16m1x8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tu(vfloat32mf2x8_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_f32mf2x8_tu(vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tu(vfloat32m1x8_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tu(vfloat32m1x8_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_f32m1x8_tu(vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tu(vfloat64m1x8_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tu(vfloat64m1x8_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_f64m1x8_tu(vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i8mf8x8_tu(vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i8mf4x8_tu(vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i8mf2x8_tu(vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i8m1x8_tu(vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tu(vint16mf4x8_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i16mf4x8_tu(vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tu(vint16mf2x8_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i16mf2x8_tu(vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i16m1x8_tu(vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tu(vint32mf2x8_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i32mf2x8_tu(vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i32m1x8_tu(vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i64m1x8_tu(vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tu(vuint8mf8x8_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u8mf8x8_tu(vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tu(vuint8mf4x8_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u8mf4x8_tu(vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tu(vuint8mf2x8_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u8mf2x8_tu(vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u8m1x8_tu(vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tu(vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u16mf4x8_tu(vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tu(vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u16mf2x8_tu(vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tu(vuint16m1x8_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u16m1x8_tu(vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tu(vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u32mf2x8_tu(vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tu(vuint32m1x8_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u32m1x8_tu(vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tu(vuint64m1x8_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u64m1x8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tum(vbool64_t vm, + vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_f16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tum(vbool32_t vm, + vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_f16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_f16m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tum(vbool64_t vm, + vfloat32mf2x8_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_f32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_f32m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_f64m1x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxseg8ei16_v_i8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg8ei16_v_i8m1x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i16m1x8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i32m1x8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i64m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg8ei16_v_u8m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u16m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u32m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u64m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_tumu(vbool64_t vm, + vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_f16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_tumu(vbool32_t vm, + vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_f16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_f16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_tumu(vbool64_t vm, + vfloat32mf2x8_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_f32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_f32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_f64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg8ei16_v_i8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_tumu(vbool64_t vm, + vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_tumu(vbool32_t vm, + vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_tumu(vbool64_t vm, + vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x8_t test_vloxseg8ei16_v_f16mf4x8_mu(vbool64_t vm, + vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_f16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x8_t test_vloxseg8ei16_v_f16mf2x8_mu(vbool32_t vm, + vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_f16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x8_t test_vloxseg8ei16_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_f16m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x8_t test_vloxseg8ei16_v_f32mf2x8_mu(vbool64_t vm, + vfloat32mf2x8_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_f32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x8_t test_vloxseg8ei16_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_f32m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x8_t test_vloxseg8ei16_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_f64m1x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x8_t test_vloxseg8ei16_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vloxseg8ei16_v_i8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x8_t test_vloxseg8ei16_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vloxseg8ei16_v_i8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x8_t test_vloxseg8ei16_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxseg8ei16_v_i8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x8_t test_vloxseg8ei16_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg8ei16_v_i8m1x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x8_t test_vloxseg8ei16_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x8_t test_vloxseg8ei16_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x8_t test_vloxseg8ei16_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vloxseg8ei16_v_i16m1x8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x8_t test_vloxseg8ei16_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x8_t test_vloxseg8ei16_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i32m1x8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x8_t test_vloxseg8ei16_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_i64m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x8_t test_vloxseg8ei16_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x8_t test_vloxseg8ei16_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x8_t test_vloxseg8ei16_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x8_t test_vloxseg8ei16_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vloxseg8ei16_v_u8m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x8_t test_vloxseg8ei16_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x8_t test_vloxseg8ei16_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x8_t test_vloxseg8ei16_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u16m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x8_t test_vloxseg8ei16_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x8_t test_vloxseg8ei16_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u32m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x8_t test_vloxseg8ei16_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei16_v_u64m1x8_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxseg8ei32.c b/auto-generated/policy_funcs/llvm-api-tests/vloxseg8ei32.c index 39af07120..3f65b9870 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxseg8ei32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxseg8ei32.c @@ -1,423 +1,635 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tu(vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_f16mf4x8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tu(vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_f16mf2x8_tu(vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tu(vfloat16m1x8_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tu(vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_f16m1x8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tu(vfloat32mf2x8_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_f32mf2x8_tu(vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tu(vfloat32m1x8_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tu(vfloat32m1x8_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg8ei32_v_f32m1x8_tu(vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tu(vfloat64m1x8_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tu(vfloat64m1x8_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_f64m1x8_tu(vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_i8mf8x8_tu(vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_i8mf4x8_tu(vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_i8mf2x8_tu(vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_i8m1x8_tu(vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tu(vint16mf4x8_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_i16mf4x8_tu(vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tu(vint16mf2x8_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_i16mf2x8_tu(vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_i16m1x8_tu(vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tu(vint32mf2x8_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_i32mf2x8_tu(vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_i32m1x8_tu(vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_i64m1x8_tu(vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tu(vuint8mf8x8_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u8mf8x8_tu(vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tu(vuint8mf4x8_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u8mf4x8_tu(vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tu(vuint8mf2x8_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u8mf2x8_tu(vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u8m1x8_tu(vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tu(vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u16mf4x8_tu(vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tu(vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u16mf2x8_tu(vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tu(vuint16m1x8_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u16m1x8_tu(vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tu(vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u32mf2x8_tu(vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tu(vuint32m1x8_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u32m1x8_tu(vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tu(vuint64m1x8_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u64m1x8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tum(vbool64_t vm, + vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_f16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tum(vbool32_t vm, + vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_f16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_f16m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tum(vbool64_t vm, + vfloat32mf2x8_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_f32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_f32m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_f64m1x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_i8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg8ei32_v_i8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxseg8ei32_v_i8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg8ei32_v_i8m1x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_i16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_i16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_i16m1x8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_i32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_i32m1x8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_i64m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg8ei32_v_u8m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u16m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u32m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u64m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_tumu(vbool64_t vm, + vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_f16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_tumu(vbool32_t vm, + vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_f16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_f16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_tumu(vbool64_t vm, + vfloat32mf2x8_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_f32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_f32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_f64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_i8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_i8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_i8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg8ei32_v_i8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_i16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_i16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_i16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_i32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_i32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_i64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_tumu(vbool64_t vm, + vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_tumu(vbool32_t vm, + vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_tumu(vbool64_t vm, + vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x8_t test_vloxseg8ei32_v_f16mf4x8_mu(vbool64_t vm, + vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_f16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x8_t test_vloxseg8ei32_v_f16mf2x8_mu(vbool32_t vm, + vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_f16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x8_t test_vloxseg8ei32_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_f16m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x8_t test_vloxseg8ei32_v_f32mf2x8_mu(vbool64_t vm, + vfloat32mf2x8_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_f32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x8_t test_vloxseg8ei32_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg8ei32_v_f32m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x8_t test_vloxseg8ei32_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_f64m1x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x8_t test_vloxseg8ei32_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vloxseg8ei32_v_i8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x8_t test_vloxseg8ei32_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg8ei32_v_i8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x8_t test_vloxseg8ei32_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxseg8ei32_v_i8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x8_t test_vloxseg8ei32_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg8ei32_v_i8m1x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x8_t test_vloxseg8ei32_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_i16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x8_t test_vloxseg8ei32_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_i16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x8_t test_vloxseg8ei32_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vloxseg8ei32_v_i16m1x8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x8_t test_vloxseg8ei32_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_i32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x8_t test_vloxseg8ei32_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vloxseg8ei32_v_i32m1x8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x8_t test_vloxseg8ei32_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_i64m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x8_t test_vloxseg8ei32_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x8_t test_vloxseg8ei32_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x8_t test_vloxseg8ei32_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x8_t test_vloxseg8ei32_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vloxseg8ei32_v_u8m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x8_t test_vloxseg8ei32_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x8_t test_vloxseg8ei32_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x8_t test_vloxseg8ei32_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u16m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x8_t test_vloxseg8ei32_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x8_t test_vloxseg8ei32_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u32m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x8_t test_vloxseg8ei32_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei32_v_u64m1x8_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxseg8ei64.c b/auto-generated/policy_funcs/llvm-api-tests/vloxseg8ei64.c index e7f6030e1..86ff4b431 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxseg8ei64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxseg8ei64.c @@ -1,423 +1,635 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tu(vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_f16mf4x8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tu(vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_f16mf2x8_tu(vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tu(vfloat16m1x8_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tu(vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_f16m1x8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tu(vfloat32mf2x8_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_f32mf2x8_tu(vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tu(vfloat32m1x8_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tu(vfloat32m1x8_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg8ei64_v_f32m1x8_tu(vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tu(vfloat64m1x8_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tu(vfloat64m1x8_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_f64m1x8_tu(vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_i8mf8x8_tu(vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_i8mf4x8_tu(vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_i8mf2x8_tu(vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_i8m1x8_tu(vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tu(vint16mf4x8_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_i16mf4x8_tu(vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tu(vint16mf2x8_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_i16mf2x8_tu(vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_i16m1x8_tu(vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tu(vint32mf2x8_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_i32mf2x8_tu(vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_i32m1x8_tu(vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_i64m1x8_tu(vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tu(vuint8mf8x8_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u8mf8x8_tu(vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tu(vuint8mf4x8_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u8mf4x8_tu(vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tu(vuint8mf2x8_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u8mf2x8_tu(vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u8m1x8_tu(vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tu(vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u16mf4x8_tu(vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tu(vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u16mf2x8_tu(vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tu(vuint16m1x8_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u16m1x8_tu(vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tu(vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u32mf2x8_tu(vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tu(vuint32m1x8_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u32m1x8_tu(vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tu(vuint64m1x8_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u64m1x8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tum(vbool64_t vm, + vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_f16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tum(vbool32_t vm, + vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_f16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_f16m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tum(vbool64_t vm, + vfloat32mf2x8_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_f32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_f32m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_f64m1x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxseg8ei64_v_i8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg8ei64_v_i8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxseg8ei64_v_i8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg8ei64_v_i8m1x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_i16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_i16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_i16m1x8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_i32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_i32m1x8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_i64m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg8ei64_v_u8m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u16m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u32m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u64m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_tumu(vbool64_t vm, + vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_f16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_tumu(vbool32_t vm, + vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_f16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_f16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_tumu(vbool64_t vm, + vfloat32mf2x8_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_f32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_f32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_f64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_i8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_i8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_i8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg8ei64_v_i8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_i16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_i16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_i16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_i32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_i32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_i64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_tumu(vbool64_t vm, + vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_tumu(vbool32_t vm, + vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_tumu(vbool64_t vm, + vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x8_t test_vloxseg8ei64_v_f16mf4x8_mu(vbool64_t vm, + vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_f16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x8_t test_vloxseg8ei64_v_f16mf2x8_mu(vbool32_t vm, + vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_f16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x8_t test_vloxseg8ei64_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_f16m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x8_t test_vloxseg8ei64_v_f32mf2x8_mu(vbool64_t vm, + vfloat32mf2x8_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_f32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x8_t test_vloxseg8ei64_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg8ei64_v_f32m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x8_t test_vloxseg8ei64_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_f64m1x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x8_t test_vloxseg8ei64_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxseg8ei64_v_i8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x8_t test_vloxseg8ei64_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg8ei64_v_i8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x8_t test_vloxseg8ei64_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxseg8ei64_v_i8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x8_t test_vloxseg8ei64_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg8ei64_v_i8m1x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x8_t test_vloxseg8ei64_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_i16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x8_t test_vloxseg8ei64_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_i16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x8_t test_vloxseg8ei64_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vloxseg8ei64_v_i16m1x8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x8_t test_vloxseg8ei64_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_i32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x8_t test_vloxseg8ei64_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vloxseg8ei64_v_i32m1x8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x8_t test_vloxseg8ei64_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vloxseg8ei64_v_i64m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x8_t test_vloxseg8ei64_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x8_t test_vloxseg8ei64_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x8_t test_vloxseg8ei64_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x8_t test_vloxseg8ei64_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vloxseg8ei64_v_u8m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x8_t test_vloxseg8ei64_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x8_t test_vloxseg8ei64_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x8_t test_vloxseg8ei64_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u16m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x8_t test_vloxseg8ei64_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x8_t test_vloxseg8ei64_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u32m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x8_t test_vloxseg8ei64_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vloxseg8ei64_v_u64m1x8_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vloxseg8ei8.c b/auto-generated/policy_funcs/llvm-api-tests/vloxseg8ei8.c index ec6413c3c..231e3055d 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vloxseg8ei8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vloxseg8ei8.c @@ -1,423 +1,629 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tu(vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_f16mf4x8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tu(vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_f16mf2x8_tu(vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tu(vfloat16m1x8_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tu(vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_f16m1x8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tu(vfloat32mf2x8_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_f32mf2x8_tu(vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tu(vfloat32m1x8_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tu(vfloat32m1x8_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_f32m1x8_tu(vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tu(vfloat64m1x8_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tu(vfloat64m1x8_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_f64m1x8_tu(vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_i8mf8x8_tu(vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_i8mf4x8_tu(vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_i8mf2x8_tu(vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_i8m1x8_tu(vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tu(vint16mf4x8_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_i16mf4x8_tu(vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tu(vint16mf2x8_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_i16mf2x8_tu(vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_i16m1x8_tu(vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tu(vint32mf2x8_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_i32mf2x8_tu(vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_i32m1x8_tu(vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_i64m1x8_tu(vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tu(vuint8mf8x8_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_u8mf8x8_tu(vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tu(vuint8mf4x8_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_u8mf4x8_tu(vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tu(vuint8mf2x8_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_u8mf2x8_tu(vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_u8m1x8_tu(vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tu(vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_u16mf4x8_tu(vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tu(vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_u16mf2x8_tu(vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tu(vuint16m1x8_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_u16m1x8_tu(vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tu(vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_u32mf2x8_tu(vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tu(vuint32m1x8_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_u32m1x8_tu(vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tu(vuint64m1x8_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_u64m1x8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tum(vbool64_t vm, + vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_f16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tum(vbool32_t vm, + vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_f16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_f16m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tum(vbool64_t vm, + vfloat32mf2x8_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_f32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_f32m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_f64m1x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_i8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_i8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_i8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_i8m1x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_i16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_i16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_i16m1x8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_i32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_i32m1x8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_i64m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_u8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_u8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_u8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_u8m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_u16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_u16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_u16m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_u32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_u32m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_u64m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_tumu(vbool64_t vm, + vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_f16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_tumu(vbool32_t vm, + vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_f16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_f16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_tumu(vbool64_t vm, + vfloat32mf2x8_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_f32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_f32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_f64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_i8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_i8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_i8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_i8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_i16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_i16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_i16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_i32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_i32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_i64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_u8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_u8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_u8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_u8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_u16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_u16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_u16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_u32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_u32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_u64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x8_t test_vloxseg8ei8_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_f16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x8_t test_vloxseg8ei8_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_f16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x8_t test_vloxseg8ei8_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_f16m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x8_t test_vloxseg8ei8_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_f32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x8_t test_vloxseg8ei8_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_f32m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x8_t test_vloxseg8ei8_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_f64m1x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x8_t test_vloxseg8ei8_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_i8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x8_t test_vloxseg8ei8_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_i8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x8_t test_vloxseg8ei8_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_i8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x8_t test_vloxseg8ei8_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_i8m1x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x8_t test_vloxseg8ei8_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_i16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x8_t test_vloxseg8ei8_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_i16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x8_t test_vloxseg8ei8_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_i16m1x8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x8_t test_vloxseg8ei8_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_i32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x8_t test_vloxseg8ei8_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_i32m1x8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x8_t test_vloxseg8ei8_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_i64m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x8_t test_vloxseg8ei8_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_u8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x8_t test_vloxseg8ei8_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_u8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x8_t test_vloxseg8ei8_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_u8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x8_t test_vloxseg8ei8_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vloxseg8ei8_v_u8m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x8_t test_vloxseg8ei8_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_u16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x8_t test_vloxseg8ei8_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_u16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x8_t test_vloxseg8ei8_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_u16m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x8_t test_vloxseg8ei8_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_u32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x8_t test_vloxseg8ei8_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_u32m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x8_t test_vloxseg8ei8_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vloxseg8ei8_v_u64m1x8_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlse16.c b/auto-generated/policy_funcs/llvm-api-tests/vlse16.c index fe849b9d8..8901df015 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlse16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlse16.c @@ -1,295 +1,421 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vlse16_v_f16mf4_tu(vfloat16mf4_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4_t test_vlse16_v_f16mf4_tu(vfloat16mf4_t vd, const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse16_v_f16mf4_tu(vd, rs1, rs2, vl); } -vfloat16mf2_t test_vlse16_v_f16mf2_tu(vfloat16mf2_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2_t test_vlse16_v_f16mf2_tu(vfloat16mf2_t vd, const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse16_v_f16mf2_tu(vd, rs1, rs2, vl); } -vfloat16m1_t test_vlse16_v_f16m1_tu(vfloat16m1_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1_t test_vlse16_v_f16m1_tu(vfloat16m1_t vd, const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse16_v_f16m1_tu(vd, rs1, rs2, vl); } -vfloat16m2_t test_vlse16_v_f16m2_tu(vfloat16m2_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m2_t test_vlse16_v_f16m2_tu(vfloat16m2_t vd, const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse16_v_f16m2_tu(vd, rs1, rs2, vl); } -vfloat16m4_t test_vlse16_v_f16m4_tu(vfloat16m4_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m4_t test_vlse16_v_f16m4_tu(vfloat16m4_t vd, const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse16_v_f16m4_tu(vd, rs1, rs2, vl); } -vfloat16m8_t test_vlse16_v_f16m8_tu(vfloat16m8_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m8_t test_vlse16_v_f16m8_tu(vfloat16m8_t vd, const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse16_v_f16m8_tu(vd, rs1, rs2, vl); } -vint16mf4_t test_vlse16_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4_t test_vlse16_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse16_v_i16mf4_tu(vd, rs1, rs2, vl); } -vint16mf2_t test_vlse16_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2_t test_vlse16_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse16_v_i16mf2_tu(vd, rs1, rs2, vl); } -vint16m1_t test_vlse16_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1_t test_vlse16_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse16_v_i16m1_tu(vd, rs1, rs2, vl); } -vint16m2_t test_vlse16_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m2_t test_vlse16_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse16_v_i16m2_tu(vd, rs1, rs2, vl); } -vint16m4_t test_vlse16_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m4_t test_vlse16_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse16_v_i16m4_tu(vd, rs1, rs2, vl); } -vint16m8_t test_vlse16_v_i16m8_tu(vint16m8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m8_t test_vlse16_v_i16m8_tu(vint16m8_t vd, const int16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse16_v_i16m8_tu(vd, rs1, rs2, vl); } -vuint16mf4_t test_vlse16_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4_t test_vlse16_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse16_v_u16mf4_tu(vd, rs1, rs2, vl); } -vuint16mf2_t test_vlse16_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2_t test_vlse16_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse16_v_u16mf2_tu(vd, rs1, rs2, vl); } -vuint16m1_t test_vlse16_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1_t test_vlse16_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse16_v_u16m1_tu(vd, rs1, rs2, vl); } -vuint16m2_t test_vlse16_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m2_t test_vlse16_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse16_v_u16m2_tu(vd, rs1, rs2, vl); } -vuint16m4_t test_vlse16_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m4_t test_vlse16_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse16_v_u16m4_tu(vd, rs1, rs2, vl); } -vuint16m8_t test_vlse16_v_u16m8_tu(vuint16m8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m8_t test_vlse16_v_u16m8_tu(vuint16m8_t vd, const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse16_v_u16m8_tu(vd, rs1, rs2, vl); } -vfloat16mf4_t test_vlse16_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4_t test_vlse16_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_f16mf4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vlse16_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2_t test_vlse16_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_f16mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vlse16_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1_t test_vlse16_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_f16m1_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vlse16_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m2_t test_vlse16_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_f16m2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vlse16_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m4_t test_vlse16_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_f16m4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vlse16_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m8_t test_vlse16_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_f16m8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vlse16_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4_t test_vlse16_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_i16mf4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vlse16_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2_t test_vlse16_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_i16mf2_tum(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vlse16_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1_t test_vlse16_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_i16m1_tum(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vlse16_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m2_t test_vlse16_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_i16m2_tum(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vlse16_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m4_t test_vlse16_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_i16m4_tum(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vlse16_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m8_t test_vlse16_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_i16m8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vlse16_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4_t test_vlse16_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_u16mf4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vlse16_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2_t test_vlse16_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_u16mf2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vlse16_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1_t test_vlse16_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_u16m1_tum(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vlse16_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m2_t test_vlse16_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_u16m2_tum(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vlse16_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m4_t test_vlse16_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_u16m4_tum(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vlse16_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m8_t test_vlse16_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_u16m8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vlse16_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4_t test_vlse16_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_f16mf4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vlse16_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2_t test_vlse16_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_f16mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vlse16_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1_t test_vlse16_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_f16m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vlse16_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m2_t test_vlse16_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_f16m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vlse16_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m4_t test_vlse16_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_f16m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vlse16_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m8_t test_vlse16_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_f16m8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vlse16_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4_t test_vlse16_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_i16mf4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vlse16_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2_t test_vlse16_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_i16mf2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vlse16_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1_t test_vlse16_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_i16m1_tumu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vlse16_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m2_t test_vlse16_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_i16m2_tumu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vlse16_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m4_t test_vlse16_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_i16m4_tumu(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vlse16_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m8_t test_vlse16_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_i16m8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vlse16_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4_t test_vlse16_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_u16mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vlse16_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2_t test_vlse16_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_u16mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vlse16_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1_t test_vlse16_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_u16m1_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vlse16_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m2_t test_vlse16_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_u16m2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vlse16_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m4_t test_vlse16_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_u16m4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vlse16_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m8_t test_vlse16_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_u16m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vlse16_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4_t test_vlse16_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_f16mf4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vlse16_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2_t test_vlse16_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_f16mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vlse16_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1_t test_vlse16_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_f16m1_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vlse16_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m2_t test_vlse16_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_f16m2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vlse16_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m4_t test_vlse16_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_f16m4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vlse16_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m8_t test_vlse16_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_f16m8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vlse16_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4_t test_vlse16_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_i16mf4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vlse16_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2_t test_vlse16_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_i16mf2_mu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vlse16_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1_t test_vlse16_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_i16m1_mu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vlse16_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m2_t test_vlse16_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_i16m2_mu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vlse16_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m4_t test_vlse16_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_i16m4_mu(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vlse16_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m8_t test_vlse16_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_i16m8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vlse16_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4_t test_vlse16_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_u16mf4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vlse16_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2_t test_vlse16_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_u16mf2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vlse16_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1_t test_vlse16_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_u16m1_mu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vlse16_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m2_t test_vlse16_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_u16m2_mu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vlse16_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m4_t test_vlse16_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_u16m4_mu(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vlse16_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m8_t test_vlse16_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse16_v_u16m8_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlse32.c b/auto-generated/policy_funcs/llvm-api-tests/vlse32.c index 89a0360f8..cf9f02327 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlse32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlse32.c @@ -1,247 +1,352 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32mf2_t test_vlse32_v_f32mf2_tu(vfloat32mf2_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2_t test_vlse32_v_f32mf2_tu(vfloat32mf2_t vd, const float *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse32_v_f32mf2_tu(vd, rs1, rs2, vl); } -vfloat32m1_t test_vlse32_v_f32m1_tu(vfloat32m1_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1_t test_vlse32_v_f32m1_tu(vfloat32m1_t vd, const float *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse32_v_f32m1_tu(vd, rs1, rs2, vl); } -vfloat32m2_t test_vlse32_v_f32m2_tu(vfloat32m2_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m2_t test_vlse32_v_f32m2_tu(vfloat32m2_t vd, const float *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse32_v_f32m2_tu(vd, rs1, rs2, vl); } -vfloat32m4_t test_vlse32_v_f32m4_tu(vfloat32m4_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m4_t test_vlse32_v_f32m4_tu(vfloat32m4_t vd, const float *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse32_v_f32m4_tu(vd, rs1, rs2, vl); } -vfloat32m8_t test_vlse32_v_f32m8_tu(vfloat32m8_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m8_t test_vlse32_v_f32m8_tu(vfloat32m8_t vd, const float *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse32_v_f32m8_tu(vd, rs1, rs2, vl); } -vint32mf2_t test_vlse32_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2_t test_vlse32_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse32_v_i32mf2_tu(vd, rs1, rs2, vl); } -vint32m1_t test_vlse32_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1_t test_vlse32_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse32_v_i32m1_tu(vd, rs1, rs2, vl); } -vint32m2_t test_vlse32_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m2_t test_vlse32_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse32_v_i32m2_tu(vd, rs1, rs2, vl); } -vint32m4_t test_vlse32_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m4_t test_vlse32_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse32_v_i32m4_tu(vd, rs1, rs2, vl); } -vint32m8_t test_vlse32_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m8_t test_vlse32_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse32_v_i32m8_tu(vd, rs1, rs2, vl); } -vuint32mf2_t test_vlse32_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2_t test_vlse32_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse32_v_u32mf2_tu(vd, rs1, rs2, vl); } -vuint32m1_t test_vlse32_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1_t test_vlse32_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse32_v_u32m1_tu(vd, rs1, rs2, vl); } -vuint32m2_t test_vlse32_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m2_t test_vlse32_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse32_v_u32m2_tu(vd, rs1, rs2, vl); } -vuint32m4_t test_vlse32_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m4_t test_vlse32_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse32_v_u32m4_tu(vd, rs1, rs2, vl); } -vuint32m8_t test_vlse32_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m8_t test_vlse32_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse32_v_u32m8_tu(vd, rs1, rs2, vl); } -vfloat32mf2_t test_vlse32_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2_t test_vlse32_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_f32mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vlse32_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1_t test_vlse32_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_f32m1_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vlse32_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m2_t test_vlse32_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_f32m2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vlse32_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m4_t test_vlse32_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_f32m4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vlse32_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m8_t test_vlse32_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_f32m8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vlse32_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2_t test_vlse32_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_i32mf2_tum(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vlse32_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1_t test_vlse32_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_i32m1_tum(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vlse32_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m2_t test_vlse32_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_i32m2_tum(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vlse32_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m4_t test_vlse32_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_i32m4_tum(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vlse32_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m8_t test_vlse32_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_i32m8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vlse32_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2_t test_vlse32_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_u32mf2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vlse32_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1_t test_vlse32_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_u32m1_tum(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vlse32_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m2_t test_vlse32_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_u32m2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vlse32_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m4_t test_vlse32_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_u32m4_tum(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vlse32_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m8_t test_vlse32_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_u32m8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vlse32_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2_t test_vlse32_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_f32mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vlse32_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1_t test_vlse32_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_f32m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vlse32_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m2_t test_vlse32_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_f32m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vlse32_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m4_t test_vlse32_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_f32m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vlse32_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m8_t test_vlse32_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_f32m8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vlse32_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2_t test_vlse32_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_i32mf2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vlse32_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1_t test_vlse32_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_i32m1_tumu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vlse32_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m2_t test_vlse32_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_i32m2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vlse32_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m4_t test_vlse32_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_i32m4_tumu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vlse32_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m8_t test_vlse32_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_i32m8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vlse32_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2_t test_vlse32_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_u32mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vlse32_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1_t test_vlse32_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_u32m1_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vlse32_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m2_t test_vlse32_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_u32m2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vlse32_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m4_t test_vlse32_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_u32m4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vlse32_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m8_t test_vlse32_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_u32m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vlse32_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2_t test_vlse32_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_f32mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vlse32_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1_t test_vlse32_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_f32m1_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vlse32_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m2_t test_vlse32_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_f32m2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vlse32_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m4_t test_vlse32_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_f32m4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vlse32_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m8_t test_vlse32_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_f32m8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vlse32_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2_t test_vlse32_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_i32mf2_mu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vlse32_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1_t test_vlse32_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_i32m1_mu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vlse32_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m2_t test_vlse32_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_i32m2_mu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vlse32_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m4_t test_vlse32_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_i32m4_mu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vlse32_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m8_t test_vlse32_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_i32m8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vlse32_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2_t test_vlse32_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_u32mf2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vlse32_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1_t test_vlse32_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_u32m1_mu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vlse32_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m2_t test_vlse32_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_u32m2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vlse32_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m4_t test_vlse32_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_u32m4_mu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vlse32_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m8_t test_vlse32_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse32_v_u32m8_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlse64.c b/auto-generated/policy_funcs/llvm-api-tests/vlse64.c index eb0089c38..bd16581ff 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlse64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlse64.c @@ -1,199 +1,283 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat64m1_t test_vlse64_v_f64m1_tu(vfloat64m1_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1_t test_vlse64_v_f64m1_tu(vfloat64m1_t vd, const double *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse64_v_f64m1_tu(vd, rs1, rs2, vl); } -vfloat64m2_t test_vlse64_v_f64m2_tu(vfloat64m2_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m2_t test_vlse64_v_f64m2_tu(vfloat64m2_t vd, const double *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse64_v_f64m2_tu(vd, rs1, rs2, vl); } -vfloat64m4_t test_vlse64_v_f64m4_tu(vfloat64m4_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m4_t test_vlse64_v_f64m4_tu(vfloat64m4_t vd, const double *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse64_v_f64m4_tu(vd, rs1, rs2, vl); } -vfloat64m8_t test_vlse64_v_f64m8_tu(vfloat64m8_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m8_t test_vlse64_v_f64m8_tu(vfloat64m8_t vd, const double *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse64_v_f64m8_tu(vd, rs1, rs2, vl); } -vint64m1_t test_vlse64_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1_t test_vlse64_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse64_v_i64m1_tu(vd, rs1, rs2, vl); } -vint64m2_t test_vlse64_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m2_t test_vlse64_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse64_v_i64m2_tu(vd, rs1, rs2, vl); } -vint64m4_t test_vlse64_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m4_t test_vlse64_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse64_v_i64m4_tu(vd, rs1, rs2, vl); } -vint64m8_t test_vlse64_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m8_t test_vlse64_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse64_v_i64m8_tu(vd, rs1, rs2, vl); } -vuint64m1_t test_vlse64_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1_t test_vlse64_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse64_v_u64m1_tu(vd, rs1, rs2, vl); } -vuint64m2_t test_vlse64_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m2_t test_vlse64_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse64_v_u64m2_tu(vd, rs1, rs2, vl); } -vuint64m4_t test_vlse64_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m4_t test_vlse64_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse64_v_u64m4_tu(vd, rs1, rs2, vl); } -vuint64m8_t test_vlse64_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m8_t test_vlse64_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse64_v_u64m8_tu(vd, rs1, rs2, vl); } -vfloat64m1_t test_vlse64_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1_t test_vlse64_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_f64m1_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vlse64_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m2_t test_vlse64_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_f64m2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vlse64_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m4_t test_vlse64_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_f64m4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vlse64_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m8_t test_vlse64_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_f64m8_tum(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vlse64_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1_t test_vlse64_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_i64m1_tum(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vlse64_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m2_t test_vlse64_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_i64m2_tum(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vlse64_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m4_t test_vlse64_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_i64m4_tum(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vlse64_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m8_t test_vlse64_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_i64m8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vlse64_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1_t test_vlse64_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_u64m1_tum(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vlse64_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m2_t test_vlse64_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_u64m2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vlse64_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m4_t test_vlse64_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_u64m4_tum(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vlse64_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m8_t test_vlse64_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_u64m8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vlse64_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1_t test_vlse64_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_f64m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vlse64_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m2_t test_vlse64_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_f64m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vlse64_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m4_t test_vlse64_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_f64m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vlse64_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m8_t test_vlse64_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_f64m8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vlse64_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1_t test_vlse64_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_i64m1_tumu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vlse64_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m2_t test_vlse64_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_i64m2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vlse64_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m4_t test_vlse64_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_i64m4_tumu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vlse64_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m8_t test_vlse64_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_i64m8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vlse64_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1_t test_vlse64_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_u64m1_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vlse64_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m2_t test_vlse64_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_u64m2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vlse64_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m4_t test_vlse64_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_u64m4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vlse64_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m8_t test_vlse64_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_u64m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vlse64_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1_t test_vlse64_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_f64m1_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vlse64_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m2_t test_vlse64_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_f64m2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vlse64_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m4_t test_vlse64_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_f64m4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vlse64_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m8_t test_vlse64_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_f64m8_mu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vlse64_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1_t test_vlse64_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_i64m1_mu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vlse64_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m2_t test_vlse64_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_i64m2_mu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vlse64_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m4_t test_vlse64_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_i64m4_mu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vlse64_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m8_t test_vlse64_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_i64m8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vlse64_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1_t test_vlse64_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_u64m1_mu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vlse64_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m2_t test_vlse64_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_u64m2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vlse64_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m4_t test_vlse64_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_u64m4_mu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vlse64_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m8_t test_vlse64_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse64_v_u64m8_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlse8.c b/auto-generated/policy_funcs/llvm-api-tests/vlse8.c index 87f8dbe1e..3c5c98d3b 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlse8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlse8.c @@ -5,226 +5,298 @@ #include -vint8mf8_t test_vlse8_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8_t test_vlse8_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_i8mf8_tu(vd, rs1, rs2, vl); } -vint8mf4_t test_vlse8_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4_t test_vlse8_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_i8mf4_tu(vd, rs1, rs2, vl); } -vint8mf2_t test_vlse8_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2_t test_vlse8_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_i8mf2_tu(vd, rs1, rs2, vl); } -vint8m1_t test_vlse8_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1_t test_vlse8_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse8_v_i8m1_tu(vd, rs1, rs2, vl); } -vint8m2_t test_vlse8_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m2_t test_vlse8_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse8_v_i8m2_tu(vd, rs1, rs2, vl); } -vint8m4_t test_vlse8_v_i8m4_tu(vint8m4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m4_t test_vlse8_v_i8m4_tu(vint8m4_t vd, const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse8_v_i8m4_tu(vd, rs1, rs2, vl); } -vint8m8_t test_vlse8_v_i8m8_tu(vint8m8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m8_t test_vlse8_v_i8m8_tu(vint8m8_t vd, const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse8_v_i8m8_tu(vd, rs1, rs2, vl); } -vuint8mf8_t test_vlse8_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8_t test_vlse8_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_u8mf8_tu(vd, rs1, rs2, vl); } -vuint8mf4_t test_vlse8_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4_t test_vlse8_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_u8mf4_tu(vd, rs1, rs2, vl); } -vuint8mf2_t test_vlse8_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2_t test_vlse8_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_u8mf2_tu(vd, rs1, rs2, vl); } -vuint8m1_t test_vlse8_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1_t test_vlse8_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_u8m1_tu(vd, rs1, rs2, vl); } -vuint8m2_t test_vlse8_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m2_t test_vlse8_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_u8m2_tu(vd, rs1, rs2, vl); } -vuint8m4_t test_vlse8_v_u8m4_tu(vuint8m4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m4_t test_vlse8_v_u8m4_tu(vuint8m4_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_u8m4_tu(vd, rs1, rs2, vl); } -vuint8m8_t test_vlse8_v_u8m8_tu(vuint8m8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m8_t test_vlse8_v_u8m8_tu(vuint8m8_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_u8m8_tu(vd, rs1, rs2, vl); } -vint8mf8_t test_vlse8_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8_t test_vlse8_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + const int8_t *rs1, ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_i8mf8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vlse8_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4_t test_vlse8_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + const int8_t *rs1, ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_i8mf4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vlse8_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2_t test_vlse8_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, + const int8_t *rs1, ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_i8mf2_tum(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vlse8_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1_t test_vlse8_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_i8m1_tum(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vlse8_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m2_t test_vlse8_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_i8m2_tum(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vlse8_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m4_t test_vlse8_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_i8m4_tum(vm, vd, rs1, rs2, vl); } -vint8m8_t test_vlse8_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m8_t test_vlse8_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_i8m8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vlse8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8_t test_vlse8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse8_v_u8mf8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vlse8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4_t test_vlse8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse8_v_u8mf4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vlse8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2_t test_vlse8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse8_v_u8mf2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vlse8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1_t test_vlse8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_u8m1_tum(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vlse8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m2_t test_vlse8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_u8m2_tum(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vlse8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m4_t test_vlse8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_u8m4_tum(vm, vd, rs1, rs2, vl); } -vuint8m8_t test_vlse8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m8_t test_vlse8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_u8m8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vlse8_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8_t test_vlse8_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse8_v_i8mf8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vlse8_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4_t test_vlse8_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse8_v_i8mf4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vlse8_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2_t test_vlse8_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse8_v_i8mf2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vlse8_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1_t test_vlse8_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_i8m1_tumu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vlse8_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m2_t test_vlse8_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_i8m2_tumu(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vlse8_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m4_t test_vlse8_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_i8m4_tumu(vm, vd, rs1, rs2, vl); } -vint8m8_t test_vlse8_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m8_t test_vlse8_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_i8m8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vlse8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8_t test_vlse8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse8_v_u8mf8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vlse8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4_t test_vlse8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse8_v_u8mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vlse8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2_t test_vlse8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse8_v_u8mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vlse8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1_t test_vlse8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse8_v_u8m1_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vlse8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m2_t test_vlse8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse8_v_u8m2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vlse8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m4_t test_vlse8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse8_v_u8m4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m8_t test_vlse8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m8_t test_vlse8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse8_v_u8m8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vlse8_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8_t test_vlse8_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_i8mf8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vlse8_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4_t test_vlse8_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_i8mf4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vlse8_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2_t test_vlse8_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_i8mf2_mu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vlse8_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1_t test_vlse8_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_i8m1_mu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vlse8_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m2_t test_vlse8_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_i8m2_mu(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vlse8_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m4_t test_vlse8_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_i8m4_mu(vm, vd, rs1, rs2, vl); } -vint8m8_t test_vlse8_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m8_t test_vlse8_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_i8m8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vlse8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8_t test_vlse8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse8_v_u8mf8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vlse8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4_t test_vlse8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse8_v_u8mf4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vlse8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2_t test_vlse8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlse8_v_u8mf2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vlse8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1_t test_vlse8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_u8m1_mu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vlse8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m2_t test_vlse8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_u8m2_mu(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vlse8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m4_t test_vlse8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_u8m4_mu(vm, vd, rs1, rs2, vl); } -vuint8m8_t test_vlse8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m8_t test_vlse8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlse8_v_u8m8_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg2e16.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg2e16.c index 7f800f965..7194f8fbc 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg2e16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg2e16.c @@ -1,247 +1,307 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x2_t test_vlseg2e16_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4x2_t test_vlseg2e16_v_f16mf4x2_tu(vfloat16mf4x2_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg2e16_v_f16mf4x2_tu(vd, rs1, vl); } -vfloat16mf2x2_t test_vlseg2e16_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2x2_t test_vlseg2e16_v_f16mf2x2_tu(vfloat16mf2x2_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg2e16_v_f16mf2x2_tu(vd, rs1, vl); } -vfloat16m1x2_t test_vlseg2e16_v_f16m1x2_tu(vfloat16m1x2_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1x2_t test_vlseg2e16_v_f16m1x2_tu(vfloat16m1x2_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg2e16_v_f16m1x2_tu(vd, rs1, vl); } -vfloat16m2x2_t test_vlseg2e16_v_f16m2x2_tu(vfloat16m2x2_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m2x2_t test_vlseg2e16_v_f16m2x2_tu(vfloat16m2x2_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg2e16_v_f16m2x2_tu(vd, rs1, vl); } -vfloat16m4x2_t test_vlseg2e16_v_f16m4x2_tu(vfloat16m4x2_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m4x2_t test_vlseg2e16_v_f16m4x2_tu(vfloat16m4x2_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg2e16_v_f16m4x2_tu(vd, rs1, vl); } -vint16mf4x2_t test_vlseg2e16_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, size_t vl) { +vint16mf4x2_t test_vlseg2e16_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, + size_t vl) { return __riscv_vlseg2e16_v_i16mf4x2_tu(vd, rs1, vl); } -vint16mf2x2_t test_vlseg2e16_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, size_t vl) { +vint16mf2x2_t test_vlseg2e16_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, + size_t vl) { return __riscv_vlseg2e16_v_i16mf2x2_tu(vd, rs1, vl); } -vint16m1x2_t test_vlseg2e16_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, size_t vl) { +vint16m1x2_t test_vlseg2e16_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, + size_t vl) { return __riscv_vlseg2e16_v_i16m1x2_tu(vd, rs1, vl); } -vint16m2x2_t test_vlseg2e16_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, size_t vl) { +vint16m2x2_t test_vlseg2e16_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, + size_t vl) { return __riscv_vlseg2e16_v_i16m2x2_tu(vd, rs1, vl); } -vint16m4x2_t test_vlseg2e16_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, size_t vl) { +vint16m4x2_t test_vlseg2e16_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, + size_t vl) { return __riscv_vlseg2e16_v_i16m4x2_tu(vd, rs1, vl); } -vuint16mf4x2_t test_vlseg2e16_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4x2_t test_vlseg2e16_v_u16mf4x2_tu(vuint16mf4x2_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_u16mf4x2_tu(vd, rs1, vl); } -vuint16mf2x2_t test_vlseg2e16_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2x2_t test_vlseg2e16_v_u16mf2x2_tu(vuint16mf2x2_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_u16mf2x2_tu(vd, rs1, vl); } -vuint16m1x2_t test_vlseg2e16_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1x2_t test_vlseg2e16_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, + size_t vl) { return __riscv_vlseg2e16_v_u16m1x2_tu(vd, rs1, vl); } -vuint16m2x2_t test_vlseg2e16_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, size_t vl) { +vuint16m2x2_t test_vlseg2e16_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, + size_t vl) { return __riscv_vlseg2e16_v_u16m2x2_tu(vd, rs1, vl); } -vuint16m4x2_t test_vlseg2e16_v_u16m4x2_tu(vuint16m4x2_t vd, const uint16_t *rs1, size_t vl) { +vuint16m4x2_t test_vlseg2e16_v_u16m4x2_tu(vuint16m4x2_t vd, const uint16_t *rs1, + size_t vl) { return __riscv_vlseg2e16_v_u16m4x2_tu(vd, rs1, vl); } -vfloat16mf4x2_t test_vlseg2e16_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4x2_t test_vlseg2e16_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg2e16_v_f16mf4x2_tum(vm, vd, rs1, vl); } -vfloat16mf2x2_t test_vlseg2e16_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2x2_t test_vlseg2e16_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg2e16_v_f16mf2x2_tum(vm, vd, rs1, vl); } -vfloat16m1x2_t test_vlseg2e16_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1x2_t test_vlseg2e16_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg2e16_v_f16m1x2_tum(vm, vd, rs1, vl); } -vfloat16m2x2_t test_vlseg2e16_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m2x2_t test_vlseg2e16_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg2e16_v_f16m2x2_tum(vm, vd, rs1, vl); } -vfloat16m4x2_t test_vlseg2e16_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m4x2_t test_vlseg2e16_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg2e16_v_f16m4x2_tum(vm, vd, rs1, vl); } -vint16mf4x2_t test_vlseg2e16_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, size_t vl) { +vint16mf4x2_t test_vlseg2e16_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_i16mf4x2_tum(vm, vd, rs1, vl); } -vint16mf2x2_t test_vlseg2e16_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, size_t vl) { +vint16mf2x2_t test_vlseg2e16_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_i16mf2x2_tum(vm, vd, rs1, vl); } -vint16m1x2_t test_vlseg2e16_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, size_t vl) { +vint16m1x2_t test_vlseg2e16_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_i16m1x2_tum(vm, vd, rs1, vl); } -vint16m2x2_t test_vlseg2e16_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, size_t vl) { +vint16m2x2_t test_vlseg2e16_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_i16m2x2_tum(vm, vd, rs1, vl); } -vint16m4x2_t test_vlseg2e16_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, size_t vl) { +vint16m4x2_t test_vlseg2e16_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_i16m4x2_tum(vm, vd, rs1, vl); } -vuint16mf4x2_t test_vlseg2e16_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4x2_t test_vlseg2e16_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_u16mf4x2_tum(vm, vd, rs1, vl); } -vuint16mf2x2_t test_vlseg2e16_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2x2_t test_vlseg2e16_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_u16mf2x2_tum(vm, vd, rs1, vl); } -vuint16m1x2_t test_vlseg2e16_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1x2_t test_vlseg2e16_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_u16m1x2_tum(vm, vd, rs1, vl); } -vuint16m2x2_t test_vlseg2e16_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, size_t vl) { +vuint16m2x2_t test_vlseg2e16_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_u16m2x2_tum(vm, vd, rs1, vl); } -vuint16m4x2_t test_vlseg2e16_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, size_t vl) { +vuint16m4x2_t test_vlseg2e16_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_u16m4x2_tum(vm, vd, rs1, vl); } -vfloat16mf4x2_t test_vlseg2e16_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4x2_t test_vlseg2e16_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg2e16_v_f16mf4x2_tumu(vm, vd, rs1, vl); } -vfloat16mf2x2_t test_vlseg2e16_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2x2_t test_vlseg2e16_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg2e16_v_f16mf2x2_tumu(vm, vd, rs1, vl); } -vfloat16m1x2_t test_vlseg2e16_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1x2_t test_vlseg2e16_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg2e16_v_f16m1x2_tumu(vm, vd, rs1, vl); } -vfloat16m2x2_t test_vlseg2e16_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m2x2_t test_vlseg2e16_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg2e16_v_f16m2x2_tumu(vm, vd, rs1, vl); } -vfloat16m4x2_t test_vlseg2e16_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m4x2_t test_vlseg2e16_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg2e16_v_f16m4x2_tumu(vm, vd, rs1, vl); } -vint16mf4x2_t test_vlseg2e16_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, size_t vl) { +vint16mf4x2_t test_vlseg2e16_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_i16mf4x2_tumu(vm, vd, rs1, vl); } -vint16mf2x2_t test_vlseg2e16_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, size_t vl) { +vint16mf2x2_t test_vlseg2e16_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_i16mf2x2_tumu(vm, vd, rs1, vl); } -vint16m1x2_t test_vlseg2e16_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, size_t vl) { +vint16m1x2_t test_vlseg2e16_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_i16m1x2_tumu(vm, vd, rs1, vl); } -vint16m2x2_t test_vlseg2e16_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, size_t vl) { +vint16m2x2_t test_vlseg2e16_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_i16m2x2_tumu(vm, vd, rs1, vl); } -vint16m4x2_t test_vlseg2e16_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, size_t vl) { +vint16m4x2_t test_vlseg2e16_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_i16m4x2_tumu(vm, vd, rs1, vl); } -vuint16mf4x2_t test_vlseg2e16_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4x2_t test_vlseg2e16_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_u16mf4x2_tumu(vm, vd, rs1, vl); } -vuint16mf2x2_t test_vlseg2e16_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2x2_t test_vlseg2e16_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_u16mf2x2_tumu(vm, vd, rs1, vl); } -vuint16m1x2_t test_vlseg2e16_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1x2_t test_vlseg2e16_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_u16m1x2_tumu(vm, vd, rs1, vl); } -vuint16m2x2_t test_vlseg2e16_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, size_t vl) { +vuint16m2x2_t test_vlseg2e16_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_u16m2x2_tumu(vm, vd, rs1, vl); } -vuint16m4x2_t test_vlseg2e16_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, size_t vl) { +vuint16m4x2_t test_vlseg2e16_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_u16m4x2_tumu(vm, vd, rs1, vl); } -vfloat16mf4x2_t test_vlseg2e16_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4x2_t test_vlseg2e16_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg2e16_v_f16mf4x2_mu(vm, vd, rs1, vl); } -vfloat16mf2x2_t test_vlseg2e16_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2x2_t test_vlseg2e16_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg2e16_v_f16mf2x2_mu(vm, vd, rs1, vl); } -vfloat16m1x2_t test_vlseg2e16_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1x2_t test_vlseg2e16_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg2e16_v_f16m1x2_mu(vm, vd, rs1, vl); } -vfloat16m2x2_t test_vlseg2e16_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m2x2_t test_vlseg2e16_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg2e16_v_f16m2x2_mu(vm, vd, rs1, vl); } -vfloat16m4x2_t test_vlseg2e16_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m4x2_t test_vlseg2e16_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg2e16_v_f16m4x2_mu(vm, vd, rs1, vl); } -vint16mf4x2_t test_vlseg2e16_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, size_t vl) { +vint16mf4x2_t test_vlseg2e16_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_i16mf4x2_mu(vm, vd, rs1, vl); } -vint16mf2x2_t test_vlseg2e16_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, size_t vl) { +vint16mf2x2_t test_vlseg2e16_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_i16mf2x2_mu(vm, vd, rs1, vl); } -vint16m1x2_t test_vlseg2e16_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, size_t vl) { +vint16m1x2_t test_vlseg2e16_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_i16m1x2_mu(vm, vd, rs1, vl); } -vint16m2x2_t test_vlseg2e16_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, size_t vl) { +vint16m2x2_t test_vlseg2e16_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_i16m2x2_mu(vm, vd, rs1, vl); } -vint16m4x2_t test_vlseg2e16_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, size_t vl) { +vint16m4x2_t test_vlseg2e16_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_i16m4x2_mu(vm, vd, rs1, vl); } -vuint16mf4x2_t test_vlseg2e16_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4x2_t test_vlseg2e16_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_u16mf4x2_mu(vm, vd, rs1, vl); } -vuint16mf2x2_t test_vlseg2e16_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2x2_t test_vlseg2e16_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_u16mf2x2_mu(vm, vd, rs1, vl); } -vuint16m1x2_t test_vlseg2e16_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1x2_t test_vlseg2e16_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_u16m1x2_mu(vm, vd, rs1, vl); } -vuint16m2x2_t test_vlseg2e16_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, size_t vl) { +vuint16m2x2_t test_vlseg2e16_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_u16m2x2_mu(vm, vd, rs1, vl); } -vuint16m4x2_t test_vlseg2e16_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, size_t vl) { +vuint16m4x2_t test_vlseg2e16_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg2e16_v_u16m4x2_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg2e16ff.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg2e16ff.c index 17faf1c10..3800f34da 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg2e16ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg2e16ff.c @@ -1,247 +1,368 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_tu(vfloat16mf4x2_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_f16mf4x2_tu(vd, rs1, new_vl, vl); } -vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_tu(vfloat16mf2x2_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_f16mf2x2_tu(vd, rs1, new_vl, vl); } -vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_tu(vfloat16m1x2_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_tu(vfloat16m1x2_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_f16m1x2_tu(vd, rs1, new_vl, vl); } -vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_tu(vfloat16m2x2_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_tu(vfloat16m2x2_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_f16m2x2_tu(vd, rs1, new_vl, vl); } -vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_tu(vfloat16m4x2_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_tu(vfloat16m4x2_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_f16m4x2_tu(vd, rs1, new_vl, vl); } -vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_tu(vint16mf4x2_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e16ff_v_i16mf4x2_tu(vd, rs1, new_vl, vl); } -vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_tu(vint16mf2x2_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e16ff_v_i16mf2x2_tu(vd, rs1, new_vl, vl); } -vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_i16m1x2_tu(vd, rs1, new_vl, vl); } -vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_i16m2x2_tu(vd, rs1, new_vl, vl); } -vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_i16m4x2_tu(vd, rs1, new_vl, vl); } -vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_tu(vuint16mf4x2_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_u16mf4x2_tu(vd, rs1, new_vl, vl); } -vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_tu(vuint16mf2x2_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_u16mf2x2_tu(vd, rs1, new_vl, vl); } -vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_tu(vuint16m1x2_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e16ff_v_u16m1x2_tu(vd, rs1, new_vl, vl); } -vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_tu(vuint16m2x2_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e16ff_v_u16m2x2_tu(vd, rs1, new_vl, vl); } -vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_tu(vuint16m4x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_tu(vuint16m4x2_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e16ff_v_u16m4x2_tu(vd, rs1, new_vl, vl); } -vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_tum(vbool64_t vm, + vfloat16mf4x2_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_f16mf4x2_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_tum(vbool32_t vm, + vfloat16mf2x2_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_f16mf2x2_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_f16m1x2_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_f16m2x2_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_f16m4x2_tum(vm, vd, rs1, new_vl, vl); } -vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_i16mf4x2_tum(vm, vd, rs1, new_vl, vl); } -vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_i16mf2x2_tum(vm, vd, rs1, new_vl, vl); } -vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e16ff_v_i16m1x2_tum(vm, vd, rs1, new_vl, vl); } -vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e16ff_v_i16m2x2_tum(vm, vd, rs1, new_vl, vl); } -vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e16ff_v_i16m4x2_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_u16mf4x2_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_u16mf2x2_tum(vm, vd, rs1, new_vl, vl); } -vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_u16m1x2_tum(vm, vd, rs1, new_vl, vl); } -vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_u16m2x2_tum(vm, vd, rs1, new_vl, vl); } -vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_u16m4x2_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_tumu(vbool64_t vm, + vfloat16mf4x2_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_f16mf4x2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_tumu(vbool32_t vm, + vfloat16mf2x2_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_f16mf2x2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_f16m1x2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_f16m2x2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_f16m4x2_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_i16mf4x2_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_i16mf2x2_tumu(vm, vd, rs1, new_vl, vl); } -vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e16ff_v_i16m1x2_tumu(vm, vd, rs1, new_vl, vl); } -vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e16ff_v_i16m2x2_tumu(vm, vd, rs1, new_vl, vl); } -vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e16ff_v_i16m4x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_u16mf4x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_u16mf2x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_u16m1x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_u16m2x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_u16m4x2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4x2_t test_vlseg2e16ff_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_f16mf4x2_mu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2x2_t test_vlseg2e16ff_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_f16mf2x2_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1x2_t test_vlseg2e16ff_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_f16m1x2_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m2x2_t test_vlseg2e16ff_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_f16m2x2_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m4x2_t test_vlseg2e16ff_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_f16m4x2_mu(vm, vd, rs1, new_vl, vl); } -vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4x2_t test_vlseg2e16ff_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e16ff_v_i16mf4x2_mu(vm, vd, rs1, new_vl, vl); } -vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2x2_t test_vlseg2e16ff_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e16ff_v_i16mf2x2_mu(vm, vd, rs1, new_vl, vl); } -vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1x2_t test_vlseg2e16ff_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e16ff_v_i16m1x2_mu(vm, vd, rs1, new_vl, vl); } -vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m2x2_t test_vlseg2e16ff_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e16ff_v_i16m2x2_mu(vm, vd, rs1, new_vl, vl); } -vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m4x2_t test_vlseg2e16ff_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e16ff_v_i16m4x2_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4x2_t test_vlseg2e16ff_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_u16mf4x2_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2x2_t test_vlseg2e16ff_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e16ff_v_u16mf2x2_mu(vm, vd, rs1, new_vl, vl); } -vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1x2_t test_vlseg2e16ff_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e16ff_v_u16m1x2_mu(vm, vd, rs1, new_vl, vl); } -vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m2x2_t test_vlseg2e16ff_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e16ff_v_u16m2x2_mu(vm, vd, rs1, new_vl, vl); } -vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m4x2_t test_vlseg2e16ff_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e16ff_v_u16m4x2_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg2e32.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg2e32.c index cc56bd010..f2dd37aba 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg2e32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg2e32.c @@ -1,199 +1,247 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32mf2x2_t test_vlseg2e32_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float *rs1, size_t vl) { +vfloat32mf2x2_t test_vlseg2e32_v_f32mf2x2_tu(vfloat32mf2x2_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg2e32_v_f32mf2x2_tu(vd, rs1, vl); } -vfloat32m1x2_t test_vlseg2e32_v_f32m1x2_tu(vfloat32m1x2_t vd, const float *rs1, size_t vl) { +vfloat32m1x2_t test_vlseg2e32_v_f32m1x2_tu(vfloat32m1x2_t vd, const float *rs1, + size_t vl) { return __riscv_vlseg2e32_v_f32m1x2_tu(vd, rs1, vl); } -vfloat32m2x2_t test_vlseg2e32_v_f32m2x2_tu(vfloat32m2x2_t vd, const float *rs1, size_t vl) { +vfloat32m2x2_t test_vlseg2e32_v_f32m2x2_tu(vfloat32m2x2_t vd, const float *rs1, + size_t vl) { return __riscv_vlseg2e32_v_f32m2x2_tu(vd, rs1, vl); } -vfloat32m4x2_t test_vlseg2e32_v_f32m4x2_tu(vfloat32m4x2_t vd, const float *rs1, size_t vl) { +vfloat32m4x2_t test_vlseg2e32_v_f32m4x2_tu(vfloat32m4x2_t vd, const float *rs1, + size_t vl) { return __riscv_vlseg2e32_v_f32m4x2_tu(vd, rs1, vl); } -vint32mf2x2_t test_vlseg2e32_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, size_t vl) { +vint32mf2x2_t test_vlseg2e32_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, + size_t vl) { return __riscv_vlseg2e32_v_i32mf2x2_tu(vd, rs1, vl); } -vint32m1x2_t test_vlseg2e32_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, size_t vl) { +vint32m1x2_t test_vlseg2e32_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, + size_t vl) { return __riscv_vlseg2e32_v_i32m1x2_tu(vd, rs1, vl); } -vint32m2x2_t test_vlseg2e32_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, size_t vl) { +vint32m2x2_t test_vlseg2e32_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, + size_t vl) { return __riscv_vlseg2e32_v_i32m2x2_tu(vd, rs1, vl); } -vint32m4x2_t test_vlseg2e32_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, size_t vl) { +vint32m4x2_t test_vlseg2e32_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, + size_t vl) { return __riscv_vlseg2e32_v_i32m4x2_tu(vd, rs1, vl); } -vuint32mf2x2_t test_vlseg2e32_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2x2_t test_vlseg2e32_v_u32mf2x2_tu(vuint32mf2x2_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg2e32_v_u32mf2x2_tu(vd, rs1, vl); } -vuint32m1x2_t test_vlseg2e32_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1x2_t test_vlseg2e32_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, + size_t vl) { return __riscv_vlseg2e32_v_u32m1x2_tu(vd, rs1, vl); } -vuint32m2x2_t test_vlseg2e32_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, size_t vl) { +vuint32m2x2_t test_vlseg2e32_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, + size_t vl) { return __riscv_vlseg2e32_v_u32m2x2_tu(vd, rs1, vl); } -vuint32m4x2_t test_vlseg2e32_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, size_t vl) { +vuint32m4x2_t test_vlseg2e32_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, + size_t vl) { return __riscv_vlseg2e32_v_u32m4x2_tu(vd, rs1, vl); } -vfloat32mf2x2_t test_vlseg2e32_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, size_t vl) { +vfloat32mf2x2_t test_vlseg2e32_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg2e32_v_f32mf2x2_tum(vm, vd, rs1, vl); } -vfloat32m1x2_t test_vlseg2e32_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, size_t vl) { +vfloat32m1x2_t test_vlseg2e32_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg2e32_v_f32m1x2_tum(vm, vd, rs1, vl); } -vfloat32m2x2_t test_vlseg2e32_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, size_t vl) { +vfloat32m2x2_t test_vlseg2e32_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg2e32_v_f32m2x2_tum(vm, vd, rs1, vl); } -vfloat32m4x2_t test_vlseg2e32_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, size_t vl) { +vfloat32m4x2_t test_vlseg2e32_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg2e32_v_f32m4x2_tum(vm, vd, rs1, vl); } -vint32mf2x2_t test_vlseg2e32_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, size_t vl) { +vint32mf2x2_t test_vlseg2e32_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg2e32_v_i32mf2x2_tum(vm, vd, rs1, vl); } -vint32m1x2_t test_vlseg2e32_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, size_t vl) { +vint32m1x2_t test_vlseg2e32_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg2e32_v_i32m1x2_tum(vm, vd, rs1, vl); } -vint32m2x2_t test_vlseg2e32_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, size_t vl) { +vint32m2x2_t test_vlseg2e32_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg2e32_v_i32m2x2_tum(vm, vd, rs1, vl); } -vint32m4x2_t test_vlseg2e32_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, size_t vl) { +vint32m4x2_t test_vlseg2e32_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg2e32_v_i32m4x2_tum(vm, vd, rs1, vl); } -vuint32mf2x2_t test_vlseg2e32_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2x2_t test_vlseg2e32_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg2e32_v_u32mf2x2_tum(vm, vd, rs1, vl); } -vuint32m1x2_t test_vlseg2e32_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1x2_t test_vlseg2e32_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg2e32_v_u32m1x2_tum(vm, vd, rs1, vl); } -vuint32m2x2_t test_vlseg2e32_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, size_t vl) { +vuint32m2x2_t test_vlseg2e32_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg2e32_v_u32m2x2_tum(vm, vd, rs1, vl); } -vuint32m4x2_t test_vlseg2e32_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, size_t vl) { +vuint32m4x2_t test_vlseg2e32_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg2e32_v_u32m4x2_tum(vm, vd, rs1, vl); } -vfloat32mf2x2_t test_vlseg2e32_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, size_t vl) { +vfloat32mf2x2_t test_vlseg2e32_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg2e32_v_f32mf2x2_tumu(vm, vd, rs1, vl); } -vfloat32m1x2_t test_vlseg2e32_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, size_t vl) { +vfloat32m1x2_t test_vlseg2e32_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg2e32_v_f32m1x2_tumu(vm, vd, rs1, vl); } -vfloat32m2x2_t test_vlseg2e32_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, size_t vl) { +vfloat32m2x2_t test_vlseg2e32_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg2e32_v_f32m2x2_tumu(vm, vd, rs1, vl); } -vfloat32m4x2_t test_vlseg2e32_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, size_t vl) { +vfloat32m4x2_t test_vlseg2e32_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg2e32_v_f32m4x2_tumu(vm, vd, rs1, vl); } -vint32mf2x2_t test_vlseg2e32_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, size_t vl) { +vint32mf2x2_t test_vlseg2e32_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg2e32_v_i32mf2x2_tumu(vm, vd, rs1, vl); } -vint32m1x2_t test_vlseg2e32_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, size_t vl) { +vint32m1x2_t test_vlseg2e32_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg2e32_v_i32m1x2_tumu(vm, vd, rs1, vl); } -vint32m2x2_t test_vlseg2e32_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, size_t vl) { +vint32m2x2_t test_vlseg2e32_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg2e32_v_i32m2x2_tumu(vm, vd, rs1, vl); } -vint32m4x2_t test_vlseg2e32_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, size_t vl) { +vint32m4x2_t test_vlseg2e32_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg2e32_v_i32m4x2_tumu(vm, vd, rs1, vl); } -vuint32mf2x2_t test_vlseg2e32_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2x2_t test_vlseg2e32_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg2e32_v_u32mf2x2_tumu(vm, vd, rs1, vl); } -vuint32m1x2_t test_vlseg2e32_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1x2_t test_vlseg2e32_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg2e32_v_u32m1x2_tumu(vm, vd, rs1, vl); } -vuint32m2x2_t test_vlseg2e32_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, size_t vl) { +vuint32m2x2_t test_vlseg2e32_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg2e32_v_u32m2x2_tumu(vm, vd, rs1, vl); } -vuint32m4x2_t test_vlseg2e32_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, size_t vl) { +vuint32m4x2_t test_vlseg2e32_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg2e32_v_u32m4x2_tumu(vm, vd, rs1, vl); } -vfloat32mf2x2_t test_vlseg2e32_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, size_t vl) { +vfloat32mf2x2_t test_vlseg2e32_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg2e32_v_f32mf2x2_mu(vm, vd, rs1, vl); } -vfloat32m1x2_t test_vlseg2e32_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, size_t vl) { +vfloat32m1x2_t test_vlseg2e32_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg2e32_v_f32m1x2_mu(vm, vd, rs1, vl); } -vfloat32m2x2_t test_vlseg2e32_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, size_t vl) { +vfloat32m2x2_t test_vlseg2e32_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg2e32_v_f32m2x2_mu(vm, vd, rs1, vl); } -vfloat32m4x2_t test_vlseg2e32_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, size_t vl) { +vfloat32m4x2_t test_vlseg2e32_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg2e32_v_f32m4x2_mu(vm, vd, rs1, vl); } -vint32mf2x2_t test_vlseg2e32_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, size_t vl) { +vint32mf2x2_t test_vlseg2e32_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg2e32_v_i32mf2x2_mu(vm, vd, rs1, vl); } -vint32m1x2_t test_vlseg2e32_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, size_t vl) { +vint32m1x2_t test_vlseg2e32_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg2e32_v_i32m1x2_mu(vm, vd, rs1, vl); } -vint32m2x2_t test_vlseg2e32_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, size_t vl) { +vint32m2x2_t test_vlseg2e32_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg2e32_v_i32m2x2_mu(vm, vd, rs1, vl); } -vint32m4x2_t test_vlseg2e32_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, size_t vl) { +vint32m4x2_t test_vlseg2e32_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg2e32_v_i32m4x2_mu(vm, vd, rs1, vl); } -vuint32mf2x2_t test_vlseg2e32_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2x2_t test_vlseg2e32_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg2e32_v_u32mf2x2_mu(vm, vd, rs1, vl); } -vuint32m1x2_t test_vlseg2e32_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1x2_t test_vlseg2e32_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg2e32_v_u32m1x2_mu(vm, vd, rs1, vl); } -vuint32m2x2_t test_vlseg2e32_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, size_t vl) { +vuint32m2x2_t test_vlseg2e32_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg2e32_v_u32m2x2_mu(vm, vd, rs1, vl); } -vuint32m4x2_t test_vlseg2e32_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, size_t vl) { +vuint32m4x2_t test_vlseg2e32_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg2e32_v_u32m4x2_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg2e32ff.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg2e32ff.c index 1b7495dfc..3abcf741e 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg2e32ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg2e32ff.c @@ -1,199 +1,294 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_tu(vfloat32mf2x2_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e32ff_v_f32mf2x2_tu(vd, rs1, new_vl, vl); } -vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_tu(vfloat32m1x2_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_tu(vfloat32m1x2_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e32ff_v_f32m1x2_tu(vd, rs1, new_vl, vl); } -vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_tu(vfloat32m2x2_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_tu(vfloat32m2x2_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e32ff_v_f32m2x2_tu(vd, rs1, new_vl, vl); } -vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_tu(vfloat32m4x2_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_tu(vfloat32m4x2_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e32ff_v_f32m4x2_tu(vd, rs1, new_vl, vl); } -vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_tu(vint32mf2x2_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e32ff_v_i32mf2x2_tu(vd, rs1, new_vl, vl); } -vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e32ff_v_i32m1x2_tu(vd, rs1, new_vl, vl); } -vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e32ff_v_i32m2x2_tu(vd, rs1, new_vl, vl); } -vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e32ff_v_i32m4x2_tu(vd, rs1, new_vl, vl); } -vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_tu(vuint32mf2x2_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e32ff_v_u32mf2x2_tu(vd, rs1, new_vl, vl); } -vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_tu(vuint32m1x2_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e32ff_v_u32m1x2_tu(vd, rs1, new_vl, vl); } -vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_tu(vuint32m2x2_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e32ff_v_u32m2x2_tu(vd, rs1, new_vl, vl); } -vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_tu(vuint32m4x2_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e32ff_v_u32m4x2_tu(vd, rs1, new_vl, vl); } -vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_tum(vbool64_t vm, + vfloat32mf2x2_t vd, + const float *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e32ff_v_f32mf2x2_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e32ff_v_f32m1x2_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e32ff_v_f32m2x2_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e32ff_v_f32m4x2_tum(vm, vd, rs1, new_vl, vl); } -vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e32ff_v_i32mf2x2_tum(vm, vd, rs1, new_vl, vl); } -vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e32ff_v_i32m1x2_tum(vm, vd, rs1, new_vl, vl); } -vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e32ff_v_i32m2x2_tum(vm, vd, rs1, new_vl, vl); } -vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e32ff_v_i32m4x2_tum(vm, vd, rs1, new_vl, vl); } -vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e32ff_v_u32mf2x2_tum(vm, vd, rs1, new_vl, vl); } -vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e32ff_v_u32m1x2_tum(vm, vd, rs1, new_vl, vl); } -vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e32ff_v_u32m2x2_tum(vm, vd, rs1, new_vl, vl); } -vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e32ff_v_u32m4x2_tum(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_tumu(vbool64_t vm, + vfloat32mf2x2_t vd, + const float *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e32ff_v_f32mf2x2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e32ff_v_f32m1x2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e32ff_v_f32m2x2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e32ff_v_f32m4x2_tumu(vm, vd, rs1, new_vl, vl); } -vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e32ff_v_i32mf2x2_tumu(vm, vd, rs1, new_vl, vl); } -vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e32ff_v_i32m1x2_tumu(vm, vd, rs1, new_vl, vl); } -vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e32ff_v_i32m2x2_tumu(vm, vd, rs1, new_vl, vl); } -vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e32ff_v_i32m4x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e32ff_v_u32mf2x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e32ff_v_u32m1x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e32ff_v_u32m2x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e32ff_v_u32m4x2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2x2_t test_vlseg2e32ff_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e32ff_v_f32mf2x2_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1x2_t test_vlseg2e32ff_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e32ff_v_f32m1x2_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m2x2_t test_vlseg2e32ff_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e32ff_v_f32m2x2_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m4x2_t test_vlseg2e32ff_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e32ff_v_f32m4x2_mu(vm, vd, rs1, new_vl, vl); } -vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2x2_t test_vlseg2e32ff_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e32ff_v_i32mf2x2_mu(vm, vd, rs1, new_vl, vl); } -vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1x2_t test_vlseg2e32ff_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e32ff_v_i32m1x2_mu(vm, vd, rs1, new_vl, vl); } -vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m2x2_t test_vlseg2e32ff_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e32ff_v_i32m2x2_mu(vm, vd, rs1, new_vl, vl); } -vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m4x2_t test_vlseg2e32ff_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e32ff_v_i32m4x2_mu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2x2_t test_vlseg2e32ff_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e32ff_v_u32mf2x2_mu(vm, vd, rs1, new_vl, vl); } -vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1x2_t test_vlseg2e32ff_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e32ff_v_u32m1x2_mu(vm, vd, rs1, new_vl, vl); } -vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m2x2_t test_vlseg2e32ff_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e32ff_v_u32m2x2_mu(vm, vd, rs1, new_vl, vl); } -vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m4x2_t test_vlseg2e32ff_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e32ff_v_u32m4x2_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg2e64.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg2e64.c index 57bd14ce3..c07f97d98 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg2e64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg2e64.c @@ -1,151 +1,187 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat64m1x2_t test_vlseg2e64_v_f64m1x2_tu(vfloat64m1x2_t vd, const double *rs1, size_t vl) { +vfloat64m1x2_t test_vlseg2e64_v_f64m1x2_tu(vfloat64m1x2_t vd, const double *rs1, + size_t vl) { return __riscv_vlseg2e64_v_f64m1x2_tu(vd, rs1, vl); } -vfloat64m2x2_t test_vlseg2e64_v_f64m2x2_tu(vfloat64m2x2_t vd, const double *rs1, size_t vl) { +vfloat64m2x2_t test_vlseg2e64_v_f64m2x2_tu(vfloat64m2x2_t vd, const double *rs1, + size_t vl) { return __riscv_vlseg2e64_v_f64m2x2_tu(vd, rs1, vl); } -vfloat64m4x2_t test_vlseg2e64_v_f64m4x2_tu(vfloat64m4x2_t vd, const double *rs1, size_t vl) { +vfloat64m4x2_t test_vlseg2e64_v_f64m4x2_tu(vfloat64m4x2_t vd, const double *rs1, + size_t vl) { return __riscv_vlseg2e64_v_f64m4x2_tu(vd, rs1, vl); } -vint64m1x2_t test_vlseg2e64_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, size_t vl) { +vint64m1x2_t test_vlseg2e64_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, + size_t vl) { return __riscv_vlseg2e64_v_i64m1x2_tu(vd, rs1, vl); } -vint64m2x2_t test_vlseg2e64_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, size_t vl) { +vint64m2x2_t test_vlseg2e64_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, + size_t vl) { return __riscv_vlseg2e64_v_i64m2x2_tu(vd, rs1, vl); } -vint64m4x2_t test_vlseg2e64_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, size_t vl) { +vint64m4x2_t test_vlseg2e64_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, + size_t vl) { return __riscv_vlseg2e64_v_i64m4x2_tu(vd, rs1, vl); } -vuint64m1x2_t test_vlseg2e64_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1x2_t test_vlseg2e64_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, + size_t vl) { return __riscv_vlseg2e64_v_u64m1x2_tu(vd, rs1, vl); } -vuint64m2x2_t test_vlseg2e64_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, size_t vl) { +vuint64m2x2_t test_vlseg2e64_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, + size_t vl) { return __riscv_vlseg2e64_v_u64m2x2_tu(vd, rs1, vl); } -vuint64m4x2_t test_vlseg2e64_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, size_t vl) { +vuint64m4x2_t test_vlseg2e64_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, + size_t vl) { return __riscv_vlseg2e64_v_u64m4x2_tu(vd, rs1, vl); } -vfloat64m1x2_t test_vlseg2e64_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, size_t vl) { +vfloat64m1x2_t test_vlseg2e64_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg2e64_v_f64m1x2_tum(vm, vd, rs1, vl); } -vfloat64m2x2_t test_vlseg2e64_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, size_t vl) { +vfloat64m2x2_t test_vlseg2e64_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg2e64_v_f64m2x2_tum(vm, vd, rs1, vl); } -vfloat64m4x2_t test_vlseg2e64_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, size_t vl) { +vfloat64m4x2_t test_vlseg2e64_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg2e64_v_f64m4x2_tum(vm, vd, rs1, vl); } -vint64m1x2_t test_vlseg2e64_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, size_t vl) { +vint64m1x2_t test_vlseg2e64_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg2e64_v_i64m1x2_tum(vm, vd, rs1, vl); } -vint64m2x2_t test_vlseg2e64_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, size_t vl) { +vint64m2x2_t test_vlseg2e64_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg2e64_v_i64m2x2_tum(vm, vd, rs1, vl); } -vint64m4x2_t test_vlseg2e64_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, size_t vl) { +vint64m4x2_t test_vlseg2e64_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg2e64_v_i64m4x2_tum(vm, vd, rs1, vl); } -vuint64m1x2_t test_vlseg2e64_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1x2_t test_vlseg2e64_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg2e64_v_u64m1x2_tum(vm, vd, rs1, vl); } -vuint64m2x2_t test_vlseg2e64_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, size_t vl) { +vuint64m2x2_t test_vlseg2e64_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg2e64_v_u64m2x2_tum(vm, vd, rs1, vl); } -vuint64m4x2_t test_vlseg2e64_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, size_t vl) { +vuint64m4x2_t test_vlseg2e64_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg2e64_v_u64m4x2_tum(vm, vd, rs1, vl); } -vfloat64m1x2_t test_vlseg2e64_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, size_t vl) { +vfloat64m1x2_t test_vlseg2e64_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg2e64_v_f64m1x2_tumu(vm, vd, rs1, vl); } -vfloat64m2x2_t test_vlseg2e64_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, size_t vl) { +vfloat64m2x2_t test_vlseg2e64_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg2e64_v_f64m2x2_tumu(vm, vd, rs1, vl); } -vfloat64m4x2_t test_vlseg2e64_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, size_t vl) { +vfloat64m4x2_t test_vlseg2e64_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg2e64_v_f64m4x2_tumu(vm, vd, rs1, vl); } -vint64m1x2_t test_vlseg2e64_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, size_t vl) { +vint64m1x2_t test_vlseg2e64_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg2e64_v_i64m1x2_tumu(vm, vd, rs1, vl); } -vint64m2x2_t test_vlseg2e64_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, size_t vl) { +vint64m2x2_t test_vlseg2e64_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg2e64_v_i64m2x2_tumu(vm, vd, rs1, vl); } -vint64m4x2_t test_vlseg2e64_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, size_t vl) { +vint64m4x2_t test_vlseg2e64_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg2e64_v_i64m4x2_tumu(vm, vd, rs1, vl); } -vuint64m1x2_t test_vlseg2e64_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1x2_t test_vlseg2e64_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg2e64_v_u64m1x2_tumu(vm, vd, rs1, vl); } -vuint64m2x2_t test_vlseg2e64_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, size_t vl) { +vuint64m2x2_t test_vlseg2e64_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg2e64_v_u64m2x2_tumu(vm, vd, rs1, vl); } -vuint64m4x2_t test_vlseg2e64_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, size_t vl) { +vuint64m4x2_t test_vlseg2e64_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg2e64_v_u64m4x2_tumu(vm, vd, rs1, vl); } -vfloat64m1x2_t test_vlseg2e64_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, size_t vl) { +vfloat64m1x2_t test_vlseg2e64_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg2e64_v_f64m1x2_mu(vm, vd, rs1, vl); } -vfloat64m2x2_t test_vlseg2e64_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, size_t vl) { +vfloat64m2x2_t test_vlseg2e64_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg2e64_v_f64m2x2_mu(vm, vd, rs1, vl); } -vfloat64m4x2_t test_vlseg2e64_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, size_t vl) { +vfloat64m4x2_t test_vlseg2e64_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg2e64_v_f64m4x2_mu(vm, vd, rs1, vl); } -vint64m1x2_t test_vlseg2e64_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, size_t vl) { +vint64m1x2_t test_vlseg2e64_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg2e64_v_i64m1x2_mu(vm, vd, rs1, vl); } -vint64m2x2_t test_vlseg2e64_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, size_t vl) { +vint64m2x2_t test_vlseg2e64_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg2e64_v_i64m2x2_mu(vm, vd, rs1, vl); } -vint64m4x2_t test_vlseg2e64_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, size_t vl) { +vint64m4x2_t test_vlseg2e64_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg2e64_v_i64m4x2_mu(vm, vd, rs1, vl); } -vuint64m1x2_t test_vlseg2e64_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1x2_t test_vlseg2e64_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg2e64_v_u64m1x2_mu(vm, vd, rs1, vl); } -vuint64m2x2_t test_vlseg2e64_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, size_t vl) { +vuint64m2x2_t test_vlseg2e64_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg2e64_v_u64m2x2_mu(vm, vd, rs1, vl); } -vuint64m4x2_t test_vlseg2e64_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, size_t vl) { +vuint64m4x2_t test_vlseg2e64_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg2e64_v_u64m4x2_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg2e64ff.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg2e64ff.c index e4b95ef13..ec4b79c3b 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg2e64ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg2e64ff.c @@ -1,151 +1,220 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_tu(vfloat64m1x2_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_tu(vfloat64m1x2_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e64ff_v_f64m1x2_tu(vd, rs1, new_vl, vl); } -vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_tu(vfloat64m2x2_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_tu(vfloat64m2x2_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e64ff_v_f64m2x2_tu(vd, rs1, new_vl, vl); } -vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_tu(vfloat64m4x2_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_tu(vfloat64m4x2_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e64ff_v_f64m4x2_tu(vd, rs1, new_vl, vl); } -vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e64ff_v_i64m1x2_tu(vd, rs1, new_vl, vl); } -vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e64ff_v_i64m2x2_tu(vd, rs1, new_vl, vl); } -vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e64ff_v_i64m4x2_tu(vd, rs1, new_vl, vl); } -vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_tu(vuint64m1x2_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e64ff_v_u64m1x2_tu(vd, rs1, new_vl, vl); } -vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_tu(vuint64m2x2_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e64ff_v_u64m2x2_tu(vd, rs1, new_vl, vl); } -vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_tu(vuint64m4x2_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e64ff_v_u64m4x2_tu(vd, rs1, new_vl, vl); } -vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e64ff_v_f64m1x2_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e64ff_v_f64m2x2_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e64ff_v_f64m4x2_tum(vm, vd, rs1, new_vl, vl); } -vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e64ff_v_i64m1x2_tum(vm, vd, rs1, new_vl, vl); } -vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e64ff_v_i64m2x2_tum(vm, vd, rs1, new_vl, vl); } -vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e64ff_v_i64m4x2_tum(vm, vd, rs1, new_vl, vl); } -vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e64ff_v_u64m1x2_tum(vm, vd, rs1, new_vl, vl); } -vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e64ff_v_u64m2x2_tum(vm, vd, rs1, new_vl, vl); } -vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e64ff_v_u64m4x2_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e64ff_v_f64m1x2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e64ff_v_f64m2x2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e64ff_v_f64m4x2_tumu(vm, vd, rs1, new_vl, vl); } -vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e64ff_v_i64m1x2_tumu(vm, vd, rs1, new_vl, vl); } -vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e64ff_v_i64m2x2_tumu(vm, vd, rs1, new_vl, vl); } -vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e64ff_v_i64m4x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e64ff_v_u64m1x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e64ff_v_u64m2x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e64ff_v_u64m4x2_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1x2_t test_vlseg2e64ff_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e64ff_v_f64m1x2_mu(vm, vd, rs1, new_vl, vl); } -vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m2x2_t test_vlseg2e64ff_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e64ff_v_f64m2x2_mu(vm, vd, rs1, new_vl, vl); } -vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m4x2_t test_vlseg2e64ff_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e64ff_v_f64m4x2_mu(vm, vd, rs1, new_vl, vl); } -vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1x2_t test_vlseg2e64ff_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e64ff_v_i64m1x2_mu(vm, vd, rs1, new_vl, vl); } -vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m2x2_t test_vlseg2e64ff_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e64ff_v_i64m2x2_mu(vm, vd, rs1, new_vl, vl); } -vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m4x2_t test_vlseg2e64ff_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e64ff_v_i64m4x2_mu(vm, vd, rs1, new_vl, vl); } -vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1x2_t test_vlseg2e64ff_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e64ff_v_u64m1x2_mu(vm, vd, rs1, new_vl, vl); } -vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m2x2_t test_vlseg2e64ff_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e64ff_v_u64m2x2_mu(vm, vd, rs1, new_vl, vl); } -vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m4x2_t test_vlseg2e64ff_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e64ff_v_u64m4x2_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg2e8.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg2e8.c index af8c68c14..7a01b8376 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg2e8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg2e8.c @@ -5,194 +5,242 @@ #include -vint8mf8x2_t test_vlseg2e8_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, size_t vl) { +vint8mf8x2_t test_vlseg2e8_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg2e8_v_i8mf8x2_tu(vd, rs1, vl); } -vint8mf4x2_t test_vlseg2e8_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, size_t vl) { +vint8mf4x2_t test_vlseg2e8_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg2e8_v_i8mf4x2_tu(vd, rs1, vl); } -vint8mf2x2_t test_vlseg2e8_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, size_t vl) { +vint8mf2x2_t test_vlseg2e8_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg2e8_v_i8mf2x2_tu(vd, rs1, vl); } -vint8m1x2_t test_vlseg2e8_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, size_t vl) { +vint8m1x2_t test_vlseg2e8_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg2e8_v_i8m1x2_tu(vd, rs1, vl); } -vint8m2x2_t test_vlseg2e8_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, size_t vl) { +vint8m2x2_t test_vlseg2e8_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg2e8_v_i8m2x2_tu(vd, rs1, vl); } -vint8m4x2_t test_vlseg2e8_v_i8m4x2_tu(vint8m4x2_t vd, const int8_t *rs1, size_t vl) { +vint8m4x2_t test_vlseg2e8_v_i8m4x2_tu(vint8m4x2_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg2e8_v_i8m4x2_tu(vd, rs1, vl); } -vuint8mf8x2_t test_vlseg2e8_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8x2_t test_vlseg2e8_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg2e8_v_u8mf8x2_tu(vd, rs1, vl); } -vuint8mf4x2_t test_vlseg2e8_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4x2_t test_vlseg2e8_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg2e8_v_u8mf4x2_tu(vd, rs1, vl); } -vuint8mf2x2_t test_vlseg2e8_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2x2_t test_vlseg2e8_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg2e8_v_u8mf2x2_tu(vd, rs1, vl); } -vuint8m1x2_t test_vlseg2e8_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, size_t vl) { +vuint8m1x2_t test_vlseg2e8_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg2e8_v_u8m1x2_tu(vd, rs1, vl); } -vuint8m2x2_t test_vlseg2e8_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, size_t vl) { +vuint8m2x2_t test_vlseg2e8_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg2e8_v_u8m2x2_tu(vd, rs1, vl); } -vuint8m4x2_t test_vlseg2e8_v_u8m4x2_tu(vuint8m4x2_t vd, const uint8_t *rs1, size_t vl) { +vuint8m4x2_t test_vlseg2e8_v_u8m4x2_tu(vuint8m4x2_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg2e8_v_u8m4x2_tu(vd, rs1, vl); } -vint8mf8x2_t test_vlseg2e8_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, size_t vl) { +vint8mf8x2_t test_vlseg2e8_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_i8mf8x2_tum(vm, vd, rs1, vl); } -vint8mf4x2_t test_vlseg2e8_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, size_t vl) { +vint8mf4x2_t test_vlseg2e8_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_i8mf4x2_tum(vm, vd, rs1, vl); } -vint8mf2x2_t test_vlseg2e8_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, size_t vl) { +vint8mf2x2_t test_vlseg2e8_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_i8mf2x2_tum(vm, vd, rs1, vl); } -vint8m1x2_t test_vlseg2e8_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, size_t vl) { +vint8m1x2_t test_vlseg2e8_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_i8m1x2_tum(vm, vd, rs1, vl); } -vint8m2x2_t test_vlseg2e8_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, size_t vl) { +vint8m2x2_t test_vlseg2e8_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_i8m2x2_tum(vm, vd, rs1, vl); } -vint8m4x2_t test_vlseg2e8_v_i8m4x2_tum(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, size_t vl) { +vint8m4x2_t test_vlseg2e8_v_i8m4x2_tum(vbool2_t vm, vint8m4x2_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_i8m4x2_tum(vm, vd, rs1, vl); } -vuint8mf8x2_t test_vlseg2e8_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8x2_t test_vlseg2e8_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_u8mf8x2_tum(vm, vd, rs1, vl); } -vuint8mf4x2_t test_vlseg2e8_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4x2_t test_vlseg2e8_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_u8mf4x2_tum(vm, vd, rs1, vl); } -vuint8mf2x2_t test_vlseg2e8_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2x2_t test_vlseg2e8_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_u8mf2x2_tum(vm, vd, rs1, vl); } -vuint8m1x2_t test_vlseg2e8_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, size_t vl) { +vuint8m1x2_t test_vlseg2e8_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_u8m1x2_tum(vm, vd, rs1, vl); } -vuint8m2x2_t test_vlseg2e8_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, size_t vl) { +vuint8m2x2_t test_vlseg2e8_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_u8m2x2_tum(vm, vd, rs1, vl); } -vuint8m4x2_t test_vlseg2e8_v_u8m4x2_tum(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, size_t vl) { +vuint8m4x2_t test_vlseg2e8_v_u8m4x2_tum(vbool2_t vm, vuint8m4x2_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_u8m4x2_tum(vm, vd, rs1, vl); } -vint8mf8x2_t test_vlseg2e8_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, size_t vl) { +vint8mf8x2_t test_vlseg2e8_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_i8mf8x2_tumu(vm, vd, rs1, vl); } -vint8mf4x2_t test_vlseg2e8_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, size_t vl) { +vint8mf4x2_t test_vlseg2e8_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_i8mf4x2_tumu(vm, vd, rs1, vl); } -vint8mf2x2_t test_vlseg2e8_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, size_t vl) { +vint8mf2x2_t test_vlseg2e8_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_i8mf2x2_tumu(vm, vd, rs1, vl); } -vint8m1x2_t test_vlseg2e8_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, size_t vl) { +vint8m1x2_t test_vlseg2e8_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_i8m1x2_tumu(vm, vd, rs1, vl); } -vint8m2x2_t test_vlseg2e8_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, size_t vl) { +vint8m2x2_t test_vlseg2e8_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_i8m2x2_tumu(vm, vd, rs1, vl); } -vint8m4x2_t test_vlseg2e8_v_i8m4x2_tumu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, size_t vl) { +vint8m4x2_t test_vlseg2e8_v_i8m4x2_tumu(vbool2_t vm, vint8m4x2_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_i8m4x2_tumu(vm, vd, rs1, vl); } -vuint8mf8x2_t test_vlseg2e8_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8x2_t test_vlseg2e8_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_u8mf8x2_tumu(vm, vd, rs1, vl); } -vuint8mf4x2_t test_vlseg2e8_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4x2_t test_vlseg2e8_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_u8mf4x2_tumu(vm, vd, rs1, vl); } -vuint8mf2x2_t test_vlseg2e8_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2x2_t test_vlseg2e8_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_u8mf2x2_tumu(vm, vd, rs1, vl); } -vuint8m1x2_t test_vlseg2e8_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, size_t vl) { +vuint8m1x2_t test_vlseg2e8_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_u8m1x2_tumu(vm, vd, rs1, vl); } -vuint8m2x2_t test_vlseg2e8_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, size_t vl) { +vuint8m2x2_t test_vlseg2e8_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_u8m2x2_tumu(vm, vd, rs1, vl); } -vuint8m4x2_t test_vlseg2e8_v_u8m4x2_tumu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, size_t vl) { +vuint8m4x2_t test_vlseg2e8_v_u8m4x2_tumu(vbool2_t vm, vuint8m4x2_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_u8m4x2_tumu(vm, vd, rs1, vl); } -vint8mf8x2_t test_vlseg2e8_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, size_t vl) { +vint8mf8x2_t test_vlseg2e8_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_i8mf8x2_mu(vm, vd, rs1, vl); } -vint8mf4x2_t test_vlseg2e8_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, size_t vl) { +vint8mf4x2_t test_vlseg2e8_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_i8mf4x2_mu(vm, vd, rs1, vl); } -vint8mf2x2_t test_vlseg2e8_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, size_t vl) { +vint8mf2x2_t test_vlseg2e8_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_i8mf2x2_mu(vm, vd, rs1, vl); } -vint8m1x2_t test_vlseg2e8_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, size_t vl) { +vint8m1x2_t test_vlseg2e8_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_i8m1x2_mu(vm, vd, rs1, vl); } -vint8m2x2_t test_vlseg2e8_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, size_t vl) { +vint8m2x2_t test_vlseg2e8_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_i8m2x2_mu(vm, vd, rs1, vl); } -vint8m4x2_t test_vlseg2e8_v_i8m4x2_mu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, size_t vl) { +vint8m4x2_t test_vlseg2e8_v_i8m4x2_mu(vbool2_t vm, vint8m4x2_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_i8m4x2_mu(vm, vd, rs1, vl); } -vuint8mf8x2_t test_vlseg2e8_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8x2_t test_vlseg2e8_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_u8mf8x2_mu(vm, vd, rs1, vl); } -vuint8mf4x2_t test_vlseg2e8_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4x2_t test_vlseg2e8_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_u8mf4x2_mu(vm, vd, rs1, vl); } -vuint8mf2x2_t test_vlseg2e8_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2x2_t test_vlseg2e8_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_u8mf2x2_mu(vm, vd, rs1, vl); } -vuint8m1x2_t test_vlseg2e8_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, size_t vl) { +vuint8m1x2_t test_vlseg2e8_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_u8m1x2_mu(vm, vd, rs1, vl); } -vuint8m2x2_t test_vlseg2e8_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, size_t vl) { +vuint8m2x2_t test_vlseg2e8_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_u8m2x2_mu(vm, vd, rs1, vl); } -vuint8m4x2_t test_vlseg2e8_v_u8m4x2_mu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, size_t vl) { +vuint8m4x2_t test_vlseg2e8_v_u8m4x2_mu(vbool2_t vm, vuint8m4x2_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg2e8_v_u8m4x2_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg2e8ff.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg2e8ff.c index 48a007333..e817f602d 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg2e8ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg2e8ff.c @@ -1,199 +1,283 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e8ff_v_i8mf8x2_tu(vd, rs1, new_vl, vl); } -vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e8ff_v_i8mf4x2_tu(vd, rs1, new_vl, vl); } -vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e8ff_v_i8mf2x2_tu(vd, rs1, new_vl, vl); } -vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e8ff_v_i8m1x2_tu(vd, rs1, new_vl, vl); } -vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e8ff_v_i8m2x2_tu(vd, rs1, new_vl, vl); } -vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_tu(vint8m4x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_tu(vint8m4x2_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e8ff_v_i8m4x2_tu(vd, rs1, new_vl, vl); } -vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e8ff_v_u8mf8x2_tu(vd, rs1, new_vl, vl); } -vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e8ff_v_u8mf4x2_tu(vd, rs1, new_vl, vl); } -vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e8ff_v_u8mf2x2_tu(vd, rs1, new_vl, vl); } -vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e8ff_v_u8m1x2_tu(vd, rs1, new_vl, vl); } -vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e8ff_v_u8m2x2_tu(vd, rs1, new_vl, vl); } -vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_tu(vuint8m4x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_tu(vuint8m4x2_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg2e8ff_v_u8m4x2_tu(vd, rs1, new_vl, vl); } -vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_i8mf8x2_tum(vm, vd, rs1, new_vl, vl); } -vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_i8mf4x2_tum(vm, vd, rs1, new_vl, vl); } -vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_i8mf2x2_tum(vm, vd, rs1, new_vl, vl); } -vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_i8m1x2_tum(vm, vd, rs1, new_vl, vl); } -vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_i8m2x2_tum(vm, vd, rs1, new_vl, vl); } -vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_tum(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_tum(vbool2_t vm, vint8m4x2_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_i8m4x2_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_u8mf8x2_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_u8mf4x2_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_u8mf2x2_tum(vm, vd, rs1, new_vl, vl); } -vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_u8m1x2_tum(vm, vd, rs1, new_vl, vl); } -vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_u8m2x2_tum(vm, vd, rs1, new_vl, vl); } -vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_tum(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_tum(vbool2_t vm, vuint8m4x2_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_u8m4x2_tum(vm, vd, rs1, new_vl, vl); } -vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_i8mf8x2_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_i8mf4x2_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_i8mf2x2_tumu(vm, vd, rs1, new_vl, vl); } -vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_i8m1x2_tumu(vm, vd, rs1, new_vl, vl); } -vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_i8m2x2_tumu(vm, vd, rs1, new_vl, vl); } -vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_tumu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_tumu(vbool2_t vm, vint8m4x2_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_i8m4x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_u8mf8x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_u8mf4x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_u8mf2x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_u8m1x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_u8m2x2_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_tumu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_tumu(vbool2_t vm, vuint8m4x2_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_u8m4x2_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8x2_t test_vlseg2e8ff_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_i8mf8x2_mu(vm, vd, rs1, new_vl, vl); } -vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4x2_t test_vlseg2e8ff_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_i8mf4x2_mu(vm, vd, rs1, new_vl, vl); } -vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2x2_t test_vlseg2e8ff_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_i8mf2x2_mu(vm, vd, rs1, new_vl, vl); } -vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1x2_t test_vlseg2e8ff_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_i8m1x2_mu(vm, vd, rs1, new_vl, vl); } -vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m2x2_t test_vlseg2e8ff_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_i8m2x2_mu(vm, vd, rs1, new_vl, vl); } -vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_mu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m4x2_t test_vlseg2e8ff_v_i8m4x2_mu(vbool2_t vm, vint8m4x2_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_i8m4x2_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8x2_t test_vlseg2e8ff_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_u8mf8x2_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4x2_t test_vlseg2e8ff_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_u8mf4x2_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2x2_t test_vlseg2e8ff_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_u8mf2x2_mu(vm, vd, rs1, new_vl, vl); } -vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1x2_t test_vlseg2e8ff_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_u8m1x2_mu(vm, vd, rs1, new_vl, vl); } -vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m2x2_t test_vlseg2e8ff_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_u8m2x2_mu(vm, vd, rs1, new_vl, vl); } -vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_mu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m4x2_t test_vlseg2e8ff_v_u8m4x2_mu(vbool2_t vm, vuint8m4x2_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg2e8ff_v_u8m4x2_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg3e16.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg3e16.c index b30bf5e72..a7389a98c 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg3e16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg3e16.c @@ -1,199 +1,247 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x3_t test_vlseg3e16_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4x3_t test_vlseg3e16_v_f16mf4x3_tu(vfloat16mf4x3_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg3e16_v_f16mf4x3_tu(vd, rs1, vl); } -vfloat16mf2x3_t test_vlseg3e16_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2x3_t test_vlseg3e16_v_f16mf2x3_tu(vfloat16mf2x3_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg3e16_v_f16mf2x3_tu(vd, rs1, vl); } -vfloat16m1x3_t test_vlseg3e16_v_f16m1x3_tu(vfloat16m1x3_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1x3_t test_vlseg3e16_v_f16m1x3_tu(vfloat16m1x3_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg3e16_v_f16m1x3_tu(vd, rs1, vl); } -vfloat16m2x3_t test_vlseg3e16_v_f16m2x3_tu(vfloat16m2x3_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m2x3_t test_vlseg3e16_v_f16m2x3_tu(vfloat16m2x3_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg3e16_v_f16m2x3_tu(vd, rs1, vl); } -vint16mf4x3_t test_vlseg3e16_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, size_t vl) { +vint16mf4x3_t test_vlseg3e16_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, + size_t vl) { return __riscv_vlseg3e16_v_i16mf4x3_tu(vd, rs1, vl); } -vint16mf2x3_t test_vlseg3e16_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, size_t vl) { +vint16mf2x3_t test_vlseg3e16_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, + size_t vl) { return __riscv_vlseg3e16_v_i16mf2x3_tu(vd, rs1, vl); } -vint16m1x3_t test_vlseg3e16_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, size_t vl) { +vint16m1x3_t test_vlseg3e16_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, + size_t vl) { return __riscv_vlseg3e16_v_i16m1x3_tu(vd, rs1, vl); } -vint16m2x3_t test_vlseg3e16_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, size_t vl) { +vint16m2x3_t test_vlseg3e16_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, + size_t vl) { return __riscv_vlseg3e16_v_i16m2x3_tu(vd, rs1, vl); } -vuint16mf4x3_t test_vlseg3e16_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4x3_t test_vlseg3e16_v_u16mf4x3_tu(vuint16mf4x3_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg3e16_v_u16mf4x3_tu(vd, rs1, vl); } -vuint16mf2x3_t test_vlseg3e16_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2x3_t test_vlseg3e16_v_u16mf2x3_tu(vuint16mf2x3_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg3e16_v_u16mf2x3_tu(vd, rs1, vl); } -vuint16m1x3_t test_vlseg3e16_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1x3_t test_vlseg3e16_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, + size_t vl) { return __riscv_vlseg3e16_v_u16m1x3_tu(vd, rs1, vl); } -vuint16m2x3_t test_vlseg3e16_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, size_t vl) { +vuint16m2x3_t test_vlseg3e16_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, + size_t vl) { return __riscv_vlseg3e16_v_u16m2x3_tu(vd, rs1, vl); } -vfloat16mf4x3_t test_vlseg3e16_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4x3_t test_vlseg3e16_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg3e16_v_f16mf4x3_tum(vm, vd, rs1, vl); } -vfloat16mf2x3_t test_vlseg3e16_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2x3_t test_vlseg3e16_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg3e16_v_f16mf2x3_tum(vm, vd, rs1, vl); } -vfloat16m1x3_t test_vlseg3e16_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1x3_t test_vlseg3e16_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg3e16_v_f16m1x3_tum(vm, vd, rs1, vl); } -vfloat16m2x3_t test_vlseg3e16_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m2x3_t test_vlseg3e16_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg3e16_v_f16m2x3_tum(vm, vd, rs1, vl); } -vint16mf4x3_t test_vlseg3e16_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, size_t vl) { +vint16mf4x3_t test_vlseg3e16_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg3e16_v_i16mf4x3_tum(vm, vd, rs1, vl); } -vint16mf2x3_t test_vlseg3e16_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, size_t vl) { +vint16mf2x3_t test_vlseg3e16_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg3e16_v_i16mf2x3_tum(vm, vd, rs1, vl); } -vint16m1x3_t test_vlseg3e16_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, size_t vl) { +vint16m1x3_t test_vlseg3e16_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg3e16_v_i16m1x3_tum(vm, vd, rs1, vl); } -vint16m2x3_t test_vlseg3e16_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, size_t vl) { +vint16m2x3_t test_vlseg3e16_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg3e16_v_i16m2x3_tum(vm, vd, rs1, vl); } -vuint16mf4x3_t test_vlseg3e16_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4x3_t test_vlseg3e16_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg3e16_v_u16mf4x3_tum(vm, vd, rs1, vl); } -vuint16mf2x3_t test_vlseg3e16_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2x3_t test_vlseg3e16_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg3e16_v_u16mf2x3_tum(vm, vd, rs1, vl); } -vuint16m1x3_t test_vlseg3e16_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1x3_t test_vlseg3e16_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg3e16_v_u16m1x3_tum(vm, vd, rs1, vl); } -vuint16m2x3_t test_vlseg3e16_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, size_t vl) { +vuint16m2x3_t test_vlseg3e16_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg3e16_v_u16m2x3_tum(vm, vd, rs1, vl); } -vfloat16mf4x3_t test_vlseg3e16_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4x3_t test_vlseg3e16_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg3e16_v_f16mf4x3_tumu(vm, vd, rs1, vl); } -vfloat16mf2x3_t test_vlseg3e16_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2x3_t test_vlseg3e16_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg3e16_v_f16mf2x3_tumu(vm, vd, rs1, vl); } -vfloat16m1x3_t test_vlseg3e16_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1x3_t test_vlseg3e16_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg3e16_v_f16m1x3_tumu(vm, vd, rs1, vl); } -vfloat16m2x3_t test_vlseg3e16_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m2x3_t test_vlseg3e16_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg3e16_v_f16m2x3_tumu(vm, vd, rs1, vl); } -vint16mf4x3_t test_vlseg3e16_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, size_t vl) { +vint16mf4x3_t test_vlseg3e16_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg3e16_v_i16mf4x3_tumu(vm, vd, rs1, vl); } -vint16mf2x3_t test_vlseg3e16_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, size_t vl) { +vint16mf2x3_t test_vlseg3e16_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg3e16_v_i16mf2x3_tumu(vm, vd, rs1, vl); } -vint16m1x3_t test_vlseg3e16_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, size_t vl) { +vint16m1x3_t test_vlseg3e16_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg3e16_v_i16m1x3_tumu(vm, vd, rs1, vl); } -vint16m2x3_t test_vlseg3e16_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, size_t vl) { +vint16m2x3_t test_vlseg3e16_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg3e16_v_i16m2x3_tumu(vm, vd, rs1, vl); } -vuint16mf4x3_t test_vlseg3e16_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4x3_t test_vlseg3e16_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg3e16_v_u16mf4x3_tumu(vm, vd, rs1, vl); } -vuint16mf2x3_t test_vlseg3e16_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2x3_t test_vlseg3e16_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg3e16_v_u16mf2x3_tumu(vm, vd, rs1, vl); } -vuint16m1x3_t test_vlseg3e16_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1x3_t test_vlseg3e16_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg3e16_v_u16m1x3_tumu(vm, vd, rs1, vl); } -vuint16m2x3_t test_vlseg3e16_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, size_t vl) { +vuint16m2x3_t test_vlseg3e16_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg3e16_v_u16m2x3_tumu(vm, vd, rs1, vl); } -vfloat16mf4x3_t test_vlseg3e16_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4x3_t test_vlseg3e16_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg3e16_v_f16mf4x3_mu(vm, vd, rs1, vl); } -vfloat16mf2x3_t test_vlseg3e16_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2x3_t test_vlseg3e16_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg3e16_v_f16mf2x3_mu(vm, vd, rs1, vl); } -vfloat16m1x3_t test_vlseg3e16_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1x3_t test_vlseg3e16_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg3e16_v_f16m1x3_mu(vm, vd, rs1, vl); } -vfloat16m2x3_t test_vlseg3e16_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m2x3_t test_vlseg3e16_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg3e16_v_f16m2x3_mu(vm, vd, rs1, vl); } -vint16mf4x3_t test_vlseg3e16_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, size_t vl) { +vint16mf4x3_t test_vlseg3e16_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg3e16_v_i16mf4x3_mu(vm, vd, rs1, vl); } -vint16mf2x3_t test_vlseg3e16_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, size_t vl) { +vint16mf2x3_t test_vlseg3e16_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg3e16_v_i16mf2x3_mu(vm, vd, rs1, vl); } -vint16m1x3_t test_vlseg3e16_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, size_t vl) { +vint16m1x3_t test_vlseg3e16_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg3e16_v_i16m1x3_mu(vm, vd, rs1, vl); } -vint16m2x3_t test_vlseg3e16_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, size_t vl) { +vint16m2x3_t test_vlseg3e16_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg3e16_v_i16m2x3_mu(vm, vd, rs1, vl); } -vuint16mf4x3_t test_vlseg3e16_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4x3_t test_vlseg3e16_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg3e16_v_u16mf4x3_mu(vm, vd, rs1, vl); } -vuint16mf2x3_t test_vlseg3e16_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2x3_t test_vlseg3e16_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg3e16_v_u16mf2x3_mu(vm, vd, rs1, vl); } -vuint16m1x3_t test_vlseg3e16_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1x3_t test_vlseg3e16_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg3e16_v_u16m1x3_mu(vm, vd, rs1, vl); } -vuint16m2x3_t test_vlseg3e16_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, size_t vl) { +vuint16m2x3_t test_vlseg3e16_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg3e16_v_u16m2x3_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg3e16ff.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg3e16ff.c index 574ecfc19..b785d2e44 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg3e16ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg3e16ff.c @@ -1,199 +1,297 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_tu(vfloat16mf4x3_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_f16mf4x3_tu(vd, rs1, new_vl, vl); } -vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_tu(vfloat16mf2x3_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_f16mf2x3_tu(vd, rs1, new_vl, vl); } -vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_tu(vfloat16m1x3_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_tu(vfloat16m1x3_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_f16m1x3_tu(vd, rs1, new_vl, vl); } -vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_tu(vfloat16m2x3_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_tu(vfloat16m2x3_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_f16m2x3_tu(vd, rs1, new_vl, vl); } -vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_tu(vint16mf4x3_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e16ff_v_i16mf4x3_tu(vd, rs1, new_vl, vl); } -vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_tu(vint16mf2x3_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e16ff_v_i16mf2x3_tu(vd, rs1, new_vl, vl); } -vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_i16m1x3_tu(vd, rs1, new_vl, vl); } -vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_i16m2x3_tu(vd, rs1, new_vl, vl); } -vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_tu(vuint16mf4x3_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_u16mf4x3_tu(vd, rs1, new_vl, vl); } -vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_tu(vuint16mf2x3_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_u16mf2x3_tu(vd, rs1, new_vl, vl); } -vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_tu(vuint16m1x3_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e16ff_v_u16m1x3_tu(vd, rs1, new_vl, vl); } -vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_tu(vuint16m2x3_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e16ff_v_u16m2x3_tu(vd, rs1, new_vl, vl); } -vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_tum(vbool64_t vm, + vfloat16mf4x3_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_f16mf4x3_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_tum(vbool32_t vm, + vfloat16mf2x3_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_f16mf2x3_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_f16m1x3_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_f16m2x3_tum(vm, vd, rs1, new_vl, vl); } -vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_i16mf4x3_tum(vm, vd, rs1, new_vl, vl); } -vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_i16mf2x3_tum(vm, vd, rs1, new_vl, vl); } -vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e16ff_v_i16m1x3_tum(vm, vd, rs1, new_vl, vl); } -vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e16ff_v_i16m2x3_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_u16mf4x3_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_u16mf2x3_tum(vm, vd, rs1, new_vl, vl); } -vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_u16m1x3_tum(vm, vd, rs1, new_vl, vl); } -vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_u16m2x3_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_tumu(vbool64_t vm, + vfloat16mf4x3_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_f16mf4x3_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_tumu(vbool32_t vm, + vfloat16mf2x3_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_f16mf2x3_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_f16m1x3_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_f16m2x3_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_i16mf4x3_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_i16mf2x3_tumu(vm, vd, rs1, new_vl, vl); } -vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e16ff_v_i16m1x3_tumu(vm, vd, rs1, new_vl, vl); } -vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e16ff_v_i16m2x3_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_u16mf4x3_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_u16mf2x3_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_u16m1x3_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_u16m2x3_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4x3_t test_vlseg3e16ff_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_f16mf4x3_mu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2x3_t test_vlseg3e16ff_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_f16mf2x3_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1x3_t test_vlseg3e16ff_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_f16m1x3_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m2x3_t test_vlseg3e16ff_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_f16m2x3_mu(vm, vd, rs1, new_vl, vl); } -vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4x3_t test_vlseg3e16ff_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e16ff_v_i16mf4x3_mu(vm, vd, rs1, new_vl, vl); } -vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2x3_t test_vlseg3e16ff_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e16ff_v_i16mf2x3_mu(vm, vd, rs1, new_vl, vl); } -vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1x3_t test_vlseg3e16ff_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e16ff_v_i16m1x3_mu(vm, vd, rs1, new_vl, vl); } -vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m2x3_t test_vlseg3e16ff_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e16ff_v_i16m2x3_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4x3_t test_vlseg3e16ff_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_u16mf4x3_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2x3_t test_vlseg3e16ff_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e16ff_v_u16mf2x3_mu(vm, vd, rs1, new_vl, vl); } -vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1x3_t test_vlseg3e16ff_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e16ff_v_u16m1x3_mu(vm, vd, rs1, new_vl, vl); } -vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m2x3_t test_vlseg3e16ff_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e16ff_v_u16m2x3_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg3e32.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg3e32.c index 4ec355892..f2e30c4b8 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg3e32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg3e32.c @@ -1,151 +1,187 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32mf2x3_t test_vlseg3e32_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float *rs1, size_t vl) { +vfloat32mf2x3_t test_vlseg3e32_v_f32mf2x3_tu(vfloat32mf2x3_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg3e32_v_f32mf2x3_tu(vd, rs1, vl); } -vfloat32m1x3_t test_vlseg3e32_v_f32m1x3_tu(vfloat32m1x3_t vd, const float *rs1, size_t vl) { +vfloat32m1x3_t test_vlseg3e32_v_f32m1x3_tu(vfloat32m1x3_t vd, const float *rs1, + size_t vl) { return __riscv_vlseg3e32_v_f32m1x3_tu(vd, rs1, vl); } -vfloat32m2x3_t test_vlseg3e32_v_f32m2x3_tu(vfloat32m2x3_t vd, const float *rs1, size_t vl) { +vfloat32m2x3_t test_vlseg3e32_v_f32m2x3_tu(vfloat32m2x3_t vd, const float *rs1, + size_t vl) { return __riscv_vlseg3e32_v_f32m2x3_tu(vd, rs1, vl); } -vint32mf2x3_t test_vlseg3e32_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, size_t vl) { +vint32mf2x3_t test_vlseg3e32_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, + size_t vl) { return __riscv_vlseg3e32_v_i32mf2x3_tu(vd, rs1, vl); } -vint32m1x3_t test_vlseg3e32_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, size_t vl) { +vint32m1x3_t test_vlseg3e32_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, + size_t vl) { return __riscv_vlseg3e32_v_i32m1x3_tu(vd, rs1, vl); } -vint32m2x3_t test_vlseg3e32_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, size_t vl) { +vint32m2x3_t test_vlseg3e32_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, + size_t vl) { return __riscv_vlseg3e32_v_i32m2x3_tu(vd, rs1, vl); } -vuint32mf2x3_t test_vlseg3e32_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2x3_t test_vlseg3e32_v_u32mf2x3_tu(vuint32mf2x3_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg3e32_v_u32mf2x3_tu(vd, rs1, vl); } -vuint32m1x3_t test_vlseg3e32_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1x3_t test_vlseg3e32_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, + size_t vl) { return __riscv_vlseg3e32_v_u32m1x3_tu(vd, rs1, vl); } -vuint32m2x3_t test_vlseg3e32_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, size_t vl) { +vuint32m2x3_t test_vlseg3e32_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, + size_t vl) { return __riscv_vlseg3e32_v_u32m2x3_tu(vd, rs1, vl); } -vfloat32mf2x3_t test_vlseg3e32_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, size_t vl) { +vfloat32mf2x3_t test_vlseg3e32_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg3e32_v_f32mf2x3_tum(vm, vd, rs1, vl); } -vfloat32m1x3_t test_vlseg3e32_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, size_t vl) { +vfloat32m1x3_t test_vlseg3e32_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg3e32_v_f32m1x3_tum(vm, vd, rs1, vl); } -vfloat32m2x3_t test_vlseg3e32_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, size_t vl) { +vfloat32m2x3_t test_vlseg3e32_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg3e32_v_f32m2x3_tum(vm, vd, rs1, vl); } -vint32mf2x3_t test_vlseg3e32_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, size_t vl) { +vint32mf2x3_t test_vlseg3e32_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg3e32_v_i32mf2x3_tum(vm, vd, rs1, vl); } -vint32m1x3_t test_vlseg3e32_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, size_t vl) { +vint32m1x3_t test_vlseg3e32_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg3e32_v_i32m1x3_tum(vm, vd, rs1, vl); } -vint32m2x3_t test_vlseg3e32_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, size_t vl) { +vint32m2x3_t test_vlseg3e32_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg3e32_v_i32m2x3_tum(vm, vd, rs1, vl); } -vuint32mf2x3_t test_vlseg3e32_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2x3_t test_vlseg3e32_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg3e32_v_u32mf2x3_tum(vm, vd, rs1, vl); } -vuint32m1x3_t test_vlseg3e32_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1x3_t test_vlseg3e32_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg3e32_v_u32m1x3_tum(vm, vd, rs1, vl); } -vuint32m2x3_t test_vlseg3e32_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, size_t vl) { +vuint32m2x3_t test_vlseg3e32_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg3e32_v_u32m2x3_tum(vm, vd, rs1, vl); } -vfloat32mf2x3_t test_vlseg3e32_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, size_t vl) { +vfloat32mf2x3_t test_vlseg3e32_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg3e32_v_f32mf2x3_tumu(vm, vd, rs1, vl); } -vfloat32m1x3_t test_vlseg3e32_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, size_t vl) { +vfloat32m1x3_t test_vlseg3e32_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg3e32_v_f32m1x3_tumu(vm, vd, rs1, vl); } -vfloat32m2x3_t test_vlseg3e32_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, size_t vl) { +vfloat32m2x3_t test_vlseg3e32_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg3e32_v_f32m2x3_tumu(vm, vd, rs1, vl); } -vint32mf2x3_t test_vlseg3e32_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, size_t vl) { +vint32mf2x3_t test_vlseg3e32_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg3e32_v_i32mf2x3_tumu(vm, vd, rs1, vl); } -vint32m1x3_t test_vlseg3e32_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, size_t vl) { +vint32m1x3_t test_vlseg3e32_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg3e32_v_i32m1x3_tumu(vm, vd, rs1, vl); } -vint32m2x3_t test_vlseg3e32_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, size_t vl) { +vint32m2x3_t test_vlseg3e32_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg3e32_v_i32m2x3_tumu(vm, vd, rs1, vl); } -vuint32mf2x3_t test_vlseg3e32_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2x3_t test_vlseg3e32_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg3e32_v_u32mf2x3_tumu(vm, vd, rs1, vl); } -vuint32m1x3_t test_vlseg3e32_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1x3_t test_vlseg3e32_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg3e32_v_u32m1x3_tumu(vm, vd, rs1, vl); } -vuint32m2x3_t test_vlseg3e32_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, size_t vl) { +vuint32m2x3_t test_vlseg3e32_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg3e32_v_u32m2x3_tumu(vm, vd, rs1, vl); } -vfloat32mf2x3_t test_vlseg3e32_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, size_t vl) { +vfloat32mf2x3_t test_vlseg3e32_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg3e32_v_f32mf2x3_mu(vm, vd, rs1, vl); } -vfloat32m1x3_t test_vlseg3e32_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, size_t vl) { +vfloat32m1x3_t test_vlseg3e32_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg3e32_v_f32m1x3_mu(vm, vd, rs1, vl); } -vfloat32m2x3_t test_vlseg3e32_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, size_t vl) { +vfloat32m2x3_t test_vlseg3e32_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg3e32_v_f32m2x3_mu(vm, vd, rs1, vl); } -vint32mf2x3_t test_vlseg3e32_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, size_t vl) { +vint32mf2x3_t test_vlseg3e32_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg3e32_v_i32mf2x3_mu(vm, vd, rs1, vl); } -vint32m1x3_t test_vlseg3e32_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, size_t vl) { +vint32m1x3_t test_vlseg3e32_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg3e32_v_i32m1x3_mu(vm, vd, rs1, vl); } -vint32m2x3_t test_vlseg3e32_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, size_t vl) { +vint32m2x3_t test_vlseg3e32_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg3e32_v_i32m2x3_mu(vm, vd, rs1, vl); } -vuint32mf2x3_t test_vlseg3e32_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2x3_t test_vlseg3e32_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg3e32_v_u32mf2x3_mu(vm, vd, rs1, vl); } -vuint32m1x3_t test_vlseg3e32_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1x3_t test_vlseg3e32_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg3e32_v_u32m1x3_mu(vm, vd, rs1, vl); } -vuint32m2x3_t test_vlseg3e32_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, size_t vl) { +vuint32m2x3_t test_vlseg3e32_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg3e32_v_u32m2x3_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg3e32ff.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg3e32ff.c index 05cbcb5ee..7abe7bec7 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg3e32ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg3e32ff.c @@ -1,151 +1,223 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_tu(vfloat32mf2x3_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e32ff_v_f32mf2x3_tu(vd, rs1, new_vl, vl); } -vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_tu(vfloat32m1x3_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_tu(vfloat32m1x3_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e32ff_v_f32m1x3_tu(vd, rs1, new_vl, vl); } -vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_tu(vfloat32m2x3_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_tu(vfloat32m2x3_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e32ff_v_f32m2x3_tu(vd, rs1, new_vl, vl); } -vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_tu(vint32mf2x3_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e32ff_v_i32mf2x3_tu(vd, rs1, new_vl, vl); } -vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e32ff_v_i32m1x3_tu(vd, rs1, new_vl, vl); } -vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e32ff_v_i32m2x3_tu(vd, rs1, new_vl, vl); } -vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_tu(vuint32mf2x3_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e32ff_v_u32mf2x3_tu(vd, rs1, new_vl, vl); } -vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_tu(vuint32m1x3_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e32ff_v_u32m1x3_tu(vd, rs1, new_vl, vl); } -vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_tu(vuint32m2x3_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e32ff_v_u32m2x3_tu(vd, rs1, new_vl, vl); } -vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_tum(vbool64_t vm, + vfloat32mf2x3_t vd, + const float *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e32ff_v_f32mf2x3_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e32ff_v_f32m1x3_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e32ff_v_f32m2x3_tum(vm, vd, rs1, new_vl, vl); } -vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e32ff_v_i32mf2x3_tum(vm, vd, rs1, new_vl, vl); } -vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e32ff_v_i32m1x3_tum(vm, vd, rs1, new_vl, vl); } -vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e32ff_v_i32m2x3_tum(vm, vd, rs1, new_vl, vl); } -vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e32ff_v_u32mf2x3_tum(vm, vd, rs1, new_vl, vl); } -vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e32ff_v_u32m1x3_tum(vm, vd, rs1, new_vl, vl); } -vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e32ff_v_u32m2x3_tum(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_tumu(vbool64_t vm, + vfloat32mf2x3_t vd, + const float *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e32ff_v_f32mf2x3_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e32ff_v_f32m1x3_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e32ff_v_f32m2x3_tumu(vm, vd, rs1, new_vl, vl); } -vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e32ff_v_i32mf2x3_tumu(vm, vd, rs1, new_vl, vl); } -vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e32ff_v_i32m1x3_tumu(vm, vd, rs1, new_vl, vl); } -vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e32ff_v_i32m2x3_tumu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e32ff_v_u32mf2x3_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e32ff_v_u32m1x3_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e32ff_v_u32m2x3_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2x3_t test_vlseg3e32ff_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e32ff_v_f32mf2x3_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1x3_t test_vlseg3e32ff_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e32ff_v_f32m1x3_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m2x3_t test_vlseg3e32ff_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e32ff_v_f32m2x3_mu(vm, vd, rs1, new_vl, vl); } -vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2x3_t test_vlseg3e32ff_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e32ff_v_i32mf2x3_mu(vm, vd, rs1, new_vl, vl); } -vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1x3_t test_vlseg3e32ff_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e32ff_v_i32m1x3_mu(vm, vd, rs1, new_vl, vl); } -vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m2x3_t test_vlseg3e32ff_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e32ff_v_i32m2x3_mu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2x3_t test_vlseg3e32ff_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e32ff_v_u32mf2x3_mu(vm, vd, rs1, new_vl, vl); } -vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1x3_t test_vlseg3e32ff_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e32ff_v_u32m1x3_mu(vm, vd, rs1, new_vl, vl); } -vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m2x3_t test_vlseg3e32ff_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e32ff_v_u32m2x3_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg3e64.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg3e64.c index 84192a9e6..a819342b5 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg3e64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg3e64.c @@ -1,103 +1,127 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat64m1x3_t test_vlseg3e64_v_f64m1x3_tu(vfloat64m1x3_t vd, const double *rs1, size_t vl) { +vfloat64m1x3_t test_vlseg3e64_v_f64m1x3_tu(vfloat64m1x3_t vd, const double *rs1, + size_t vl) { return __riscv_vlseg3e64_v_f64m1x3_tu(vd, rs1, vl); } -vfloat64m2x3_t test_vlseg3e64_v_f64m2x3_tu(vfloat64m2x3_t vd, const double *rs1, size_t vl) { +vfloat64m2x3_t test_vlseg3e64_v_f64m2x3_tu(vfloat64m2x3_t vd, const double *rs1, + size_t vl) { return __riscv_vlseg3e64_v_f64m2x3_tu(vd, rs1, vl); } -vint64m1x3_t test_vlseg3e64_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, size_t vl) { +vint64m1x3_t test_vlseg3e64_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, + size_t vl) { return __riscv_vlseg3e64_v_i64m1x3_tu(vd, rs1, vl); } -vint64m2x3_t test_vlseg3e64_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, size_t vl) { +vint64m2x3_t test_vlseg3e64_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, + size_t vl) { return __riscv_vlseg3e64_v_i64m2x3_tu(vd, rs1, vl); } -vuint64m1x3_t test_vlseg3e64_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1x3_t test_vlseg3e64_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, + size_t vl) { return __riscv_vlseg3e64_v_u64m1x3_tu(vd, rs1, vl); } -vuint64m2x3_t test_vlseg3e64_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, size_t vl) { +vuint64m2x3_t test_vlseg3e64_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, + size_t vl) { return __riscv_vlseg3e64_v_u64m2x3_tu(vd, rs1, vl); } -vfloat64m1x3_t test_vlseg3e64_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, size_t vl) { +vfloat64m1x3_t test_vlseg3e64_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg3e64_v_f64m1x3_tum(vm, vd, rs1, vl); } -vfloat64m2x3_t test_vlseg3e64_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, size_t vl) { +vfloat64m2x3_t test_vlseg3e64_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg3e64_v_f64m2x3_tum(vm, vd, rs1, vl); } -vint64m1x3_t test_vlseg3e64_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, size_t vl) { +vint64m1x3_t test_vlseg3e64_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg3e64_v_i64m1x3_tum(vm, vd, rs1, vl); } -vint64m2x3_t test_vlseg3e64_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, size_t vl) { +vint64m2x3_t test_vlseg3e64_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg3e64_v_i64m2x3_tum(vm, vd, rs1, vl); } -vuint64m1x3_t test_vlseg3e64_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1x3_t test_vlseg3e64_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg3e64_v_u64m1x3_tum(vm, vd, rs1, vl); } -vuint64m2x3_t test_vlseg3e64_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, size_t vl) { +vuint64m2x3_t test_vlseg3e64_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg3e64_v_u64m2x3_tum(vm, vd, rs1, vl); } -vfloat64m1x3_t test_vlseg3e64_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, size_t vl) { +vfloat64m1x3_t test_vlseg3e64_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg3e64_v_f64m1x3_tumu(vm, vd, rs1, vl); } -vfloat64m2x3_t test_vlseg3e64_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, size_t vl) { +vfloat64m2x3_t test_vlseg3e64_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg3e64_v_f64m2x3_tumu(vm, vd, rs1, vl); } -vint64m1x3_t test_vlseg3e64_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, size_t vl) { +vint64m1x3_t test_vlseg3e64_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg3e64_v_i64m1x3_tumu(vm, vd, rs1, vl); } -vint64m2x3_t test_vlseg3e64_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, size_t vl) { +vint64m2x3_t test_vlseg3e64_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg3e64_v_i64m2x3_tumu(vm, vd, rs1, vl); } -vuint64m1x3_t test_vlseg3e64_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1x3_t test_vlseg3e64_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg3e64_v_u64m1x3_tumu(vm, vd, rs1, vl); } -vuint64m2x3_t test_vlseg3e64_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, size_t vl) { +vuint64m2x3_t test_vlseg3e64_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg3e64_v_u64m2x3_tumu(vm, vd, rs1, vl); } -vfloat64m1x3_t test_vlseg3e64_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, size_t vl) { +vfloat64m1x3_t test_vlseg3e64_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg3e64_v_f64m1x3_mu(vm, vd, rs1, vl); } -vfloat64m2x3_t test_vlseg3e64_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, size_t vl) { +vfloat64m2x3_t test_vlseg3e64_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg3e64_v_f64m2x3_mu(vm, vd, rs1, vl); } -vint64m1x3_t test_vlseg3e64_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, size_t vl) { +vint64m1x3_t test_vlseg3e64_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg3e64_v_i64m1x3_mu(vm, vd, rs1, vl); } -vint64m2x3_t test_vlseg3e64_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, size_t vl) { +vint64m2x3_t test_vlseg3e64_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg3e64_v_i64m2x3_mu(vm, vd, rs1, vl); } -vuint64m1x3_t test_vlseg3e64_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1x3_t test_vlseg3e64_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg3e64_v_u64m1x3_mu(vm, vd, rs1, vl); } -vuint64m2x3_t test_vlseg3e64_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, size_t vl) { +vuint64m2x3_t test_vlseg3e64_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg3e64_v_u64m2x3_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg3e64ff.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg3e64ff.c index 21a681a53..520a58409 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg3e64ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg3e64ff.c @@ -1,103 +1,149 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_tu(vfloat64m1x3_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_tu(vfloat64m1x3_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e64ff_v_f64m1x3_tu(vd, rs1, new_vl, vl); } -vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_tu(vfloat64m2x3_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_tu(vfloat64m2x3_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e64ff_v_f64m2x3_tu(vd, rs1, new_vl, vl); } -vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e64ff_v_i64m1x3_tu(vd, rs1, new_vl, vl); } -vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e64ff_v_i64m2x3_tu(vd, rs1, new_vl, vl); } -vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_tu(vuint64m1x3_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e64ff_v_u64m1x3_tu(vd, rs1, new_vl, vl); } -vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_tu(vuint64m2x3_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e64ff_v_u64m2x3_tu(vd, rs1, new_vl, vl); } -vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e64ff_v_f64m1x3_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e64ff_v_f64m2x3_tum(vm, vd, rs1, new_vl, vl); } -vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e64ff_v_i64m1x3_tum(vm, vd, rs1, new_vl, vl); } -vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e64ff_v_i64m2x3_tum(vm, vd, rs1, new_vl, vl); } -vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e64ff_v_u64m1x3_tum(vm, vd, rs1, new_vl, vl); } -vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e64ff_v_u64m2x3_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e64ff_v_f64m1x3_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e64ff_v_f64m2x3_tumu(vm, vd, rs1, new_vl, vl); } -vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e64ff_v_i64m1x3_tumu(vm, vd, rs1, new_vl, vl); } -vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e64ff_v_i64m2x3_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e64ff_v_u64m1x3_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e64ff_v_u64m2x3_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1x3_t test_vlseg3e64ff_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e64ff_v_f64m1x3_mu(vm, vd, rs1, new_vl, vl); } -vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m2x3_t test_vlseg3e64ff_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e64ff_v_f64m2x3_mu(vm, vd, rs1, new_vl, vl); } -vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1x3_t test_vlseg3e64ff_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e64ff_v_i64m1x3_mu(vm, vd, rs1, new_vl, vl); } -vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m2x3_t test_vlseg3e64ff_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e64ff_v_i64m2x3_mu(vm, vd, rs1, new_vl, vl); } -vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1x3_t test_vlseg3e64ff_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e64ff_v_u64m1x3_mu(vm, vd, rs1, new_vl, vl); } -vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m2x3_t test_vlseg3e64ff_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e64ff_v_u64m2x3_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg3e8.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg3e8.c index bc38108c1..cd1a55c98 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg3e8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg3e8.c @@ -5,162 +5,202 @@ #include -vint8mf8x3_t test_vlseg3e8_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, size_t vl) { +vint8mf8x3_t test_vlseg3e8_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg3e8_v_i8mf8x3_tu(vd, rs1, vl); } -vint8mf4x3_t test_vlseg3e8_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, size_t vl) { +vint8mf4x3_t test_vlseg3e8_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg3e8_v_i8mf4x3_tu(vd, rs1, vl); } -vint8mf2x3_t test_vlseg3e8_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, size_t vl) { +vint8mf2x3_t test_vlseg3e8_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg3e8_v_i8mf2x3_tu(vd, rs1, vl); } -vint8m1x3_t test_vlseg3e8_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, size_t vl) { +vint8m1x3_t test_vlseg3e8_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg3e8_v_i8m1x3_tu(vd, rs1, vl); } -vint8m2x3_t test_vlseg3e8_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, size_t vl) { +vint8m2x3_t test_vlseg3e8_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg3e8_v_i8m2x3_tu(vd, rs1, vl); } -vuint8mf8x3_t test_vlseg3e8_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8x3_t test_vlseg3e8_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg3e8_v_u8mf8x3_tu(vd, rs1, vl); } -vuint8mf4x3_t test_vlseg3e8_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4x3_t test_vlseg3e8_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg3e8_v_u8mf4x3_tu(vd, rs1, vl); } -vuint8mf2x3_t test_vlseg3e8_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2x3_t test_vlseg3e8_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg3e8_v_u8mf2x3_tu(vd, rs1, vl); } -vuint8m1x3_t test_vlseg3e8_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, size_t vl) { +vuint8m1x3_t test_vlseg3e8_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg3e8_v_u8m1x3_tu(vd, rs1, vl); } -vuint8m2x3_t test_vlseg3e8_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, size_t vl) { +vuint8m2x3_t test_vlseg3e8_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg3e8_v_u8m2x3_tu(vd, rs1, vl); } -vint8mf8x3_t test_vlseg3e8_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, size_t vl) { +vint8mf8x3_t test_vlseg3e8_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg3e8_v_i8mf8x3_tum(vm, vd, rs1, vl); } -vint8mf4x3_t test_vlseg3e8_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, size_t vl) { +vint8mf4x3_t test_vlseg3e8_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg3e8_v_i8mf4x3_tum(vm, vd, rs1, vl); } -vint8mf2x3_t test_vlseg3e8_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, size_t vl) { +vint8mf2x3_t test_vlseg3e8_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg3e8_v_i8mf2x3_tum(vm, vd, rs1, vl); } -vint8m1x3_t test_vlseg3e8_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, size_t vl) { +vint8m1x3_t test_vlseg3e8_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg3e8_v_i8m1x3_tum(vm, vd, rs1, vl); } -vint8m2x3_t test_vlseg3e8_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, size_t vl) { +vint8m2x3_t test_vlseg3e8_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg3e8_v_i8m2x3_tum(vm, vd, rs1, vl); } -vuint8mf8x3_t test_vlseg3e8_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8x3_t test_vlseg3e8_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg3e8_v_u8mf8x3_tum(vm, vd, rs1, vl); } -vuint8mf4x3_t test_vlseg3e8_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4x3_t test_vlseg3e8_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg3e8_v_u8mf4x3_tum(vm, vd, rs1, vl); } -vuint8mf2x3_t test_vlseg3e8_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2x3_t test_vlseg3e8_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg3e8_v_u8mf2x3_tum(vm, vd, rs1, vl); } -vuint8m1x3_t test_vlseg3e8_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, size_t vl) { +vuint8m1x3_t test_vlseg3e8_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg3e8_v_u8m1x3_tum(vm, vd, rs1, vl); } -vuint8m2x3_t test_vlseg3e8_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, size_t vl) { +vuint8m2x3_t test_vlseg3e8_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg3e8_v_u8m2x3_tum(vm, vd, rs1, vl); } -vint8mf8x3_t test_vlseg3e8_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, size_t vl) { +vint8mf8x3_t test_vlseg3e8_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg3e8_v_i8mf8x3_tumu(vm, vd, rs1, vl); } -vint8mf4x3_t test_vlseg3e8_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, size_t vl) { +vint8mf4x3_t test_vlseg3e8_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg3e8_v_i8mf4x3_tumu(vm, vd, rs1, vl); } -vint8mf2x3_t test_vlseg3e8_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, size_t vl) { +vint8mf2x3_t test_vlseg3e8_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg3e8_v_i8mf2x3_tumu(vm, vd, rs1, vl); } -vint8m1x3_t test_vlseg3e8_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, size_t vl) { +vint8m1x3_t test_vlseg3e8_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg3e8_v_i8m1x3_tumu(vm, vd, rs1, vl); } -vint8m2x3_t test_vlseg3e8_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, size_t vl) { +vint8m2x3_t test_vlseg3e8_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg3e8_v_i8m2x3_tumu(vm, vd, rs1, vl); } -vuint8mf8x3_t test_vlseg3e8_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8x3_t test_vlseg3e8_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg3e8_v_u8mf8x3_tumu(vm, vd, rs1, vl); } -vuint8mf4x3_t test_vlseg3e8_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4x3_t test_vlseg3e8_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg3e8_v_u8mf4x3_tumu(vm, vd, rs1, vl); } -vuint8mf2x3_t test_vlseg3e8_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2x3_t test_vlseg3e8_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg3e8_v_u8mf2x3_tumu(vm, vd, rs1, vl); } -vuint8m1x3_t test_vlseg3e8_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, size_t vl) { +vuint8m1x3_t test_vlseg3e8_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg3e8_v_u8m1x3_tumu(vm, vd, rs1, vl); } -vuint8m2x3_t test_vlseg3e8_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, size_t vl) { +vuint8m2x3_t test_vlseg3e8_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg3e8_v_u8m2x3_tumu(vm, vd, rs1, vl); } -vint8mf8x3_t test_vlseg3e8_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, size_t vl) { +vint8mf8x3_t test_vlseg3e8_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg3e8_v_i8mf8x3_mu(vm, vd, rs1, vl); } -vint8mf4x3_t test_vlseg3e8_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, size_t vl) { +vint8mf4x3_t test_vlseg3e8_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg3e8_v_i8mf4x3_mu(vm, vd, rs1, vl); } -vint8mf2x3_t test_vlseg3e8_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, size_t vl) { +vint8mf2x3_t test_vlseg3e8_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg3e8_v_i8mf2x3_mu(vm, vd, rs1, vl); } -vint8m1x3_t test_vlseg3e8_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, size_t vl) { +vint8m1x3_t test_vlseg3e8_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg3e8_v_i8m1x3_mu(vm, vd, rs1, vl); } -vint8m2x3_t test_vlseg3e8_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, size_t vl) { +vint8m2x3_t test_vlseg3e8_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg3e8_v_i8m2x3_mu(vm, vd, rs1, vl); } -vuint8mf8x3_t test_vlseg3e8_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8x3_t test_vlseg3e8_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg3e8_v_u8mf8x3_mu(vm, vd, rs1, vl); } -vuint8mf4x3_t test_vlseg3e8_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4x3_t test_vlseg3e8_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg3e8_v_u8mf4x3_mu(vm, vd, rs1, vl); } -vuint8mf2x3_t test_vlseg3e8_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2x3_t test_vlseg3e8_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg3e8_v_u8mf2x3_mu(vm, vd, rs1, vl); } -vuint8m1x3_t test_vlseg3e8_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, size_t vl) { +vuint8m1x3_t test_vlseg3e8_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg3e8_v_u8m1x3_mu(vm, vd, rs1, vl); } -vuint8m2x3_t test_vlseg3e8_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, size_t vl) { +vuint8m2x3_t test_vlseg3e8_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg3e8_v_u8m2x3_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg3e8ff.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg3e8ff.c index fa952de63..1958782e8 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg3e8ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg3e8ff.c @@ -1,167 +1,237 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e8ff_v_i8mf8x3_tu(vd, rs1, new_vl, vl); } -vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e8ff_v_i8mf4x3_tu(vd, rs1, new_vl, vl); } -vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e8ff_v_i8mf2x3_tu(vd, rs1, new_vl, vl); } -vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e8ff_v_i8m1x3_tu(vd, rs1, new_vl, vl); } -vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e8ff_v_i8m2x3_tu(vd, rs1, new_vl, vl); } -vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e8ff_v_u8mf8x3_tu(vd, rs1, new_vl, vl); } -vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e8ff_v_u8mf4x3_tu(vd, rs1, new_vl, vl); } -vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e8ff_v_u8mf2x3_tu(vd, rs1, new_vl, vl); } -vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e8ff_v_u8m1x3_tu(vd, rs1, new_vl, vl); } -vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg3e8ff_v_u8m2x3_tu(vd, rs1, new_vl, vl); } -vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e8ff_v_i8mf8x3_tum(vm, vd, rs1, new_vl, vl); } -vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e8ff_v_i8mf4x3_tum(vm, vd, rs1, new_vl, vl); } -vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e8ff_v_i8mf2x3_tum(vm, vd, rs1, new_vl, vl); } -vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e8ff_v_i8m1x3_tum(vm, vd, rs1, new_vl, vl); } -vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e8ff_v_i8m2x3_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e8ff_v_u8mf8x3_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e8ff_v_u8mf4x3_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e8ff_v_u8mf2x3_tum(vm, vd, rs1, new_vl, vl); } -vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e8ff_v_u8m1x3_tum(vm, vd, rs1, new_vl, vl); } -vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e8ff_v_u8m2x3_tum(vm, vd, rs1, new_vl, vl); } -vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e8ff_v_i8mf8x3_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e8ff_v_i8mf4x3_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e8ff_v_i8mf2x3_tumu(vm, vd, rs1, new_vl, vl); } -vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e8ff_v_i8m1x3_tumu(vm, vd, rs1, new_vl, vl); } -vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e8ff_v_i8m2x3_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e8ff_v_u8mf8x3_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e8ff_v_u8mf4x3_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e8ff_v_u8mf2x3_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e8ff_v_u8m1x3_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e8ff_v_u8m2x3_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8x3_t test_vlseg3e8ff_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e8ff_v_i8mf8x3_mu(vm, vd, rs1, new_vl, vl); } -vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4x3_t test_vlseg3e8ff_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e8ff_v_i8mf4x3_mu(vm, vd, rs1, new_vl, vl); } -vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2x3_t test_vlseg3e8ff_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e8ff_v_i8mf2x3_mu(vm, vd, rs1, new_vl, vl); } -vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1x3_t test_vlseg3e8ff_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e8ff_v_i8m1x3_mu(vm, vd, rs1, new_vl, vl); } -vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m2x3_t test_vlseg3e8ff_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e8ff_v_i8m2x3_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8x3_t test_vlseg3e8ff_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e8ff_v_u8mf8x3_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4x3_t test_vlseg3e8ff_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e8ff_v_u8mf4x3_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2x3_t test_vlseg3e8ff_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e8ff_v_u8mf2x3_mu(vm, vd, rs1, new_vl, vl); } -vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1x3_t test_vlseg3e8ff_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e8ff_v_u8m1x3_mu(vm, vd, rs1, new_vl, vl); } -vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m2x3_t test_vlseg3e8ff_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg3e8ff_v_u8m2x3_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg4e16.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg4e16.c index 57c616186..41091d791 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg4e16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg4e16.c @@ -1,199 +1,247 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x4_t test_vlseg4e16_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4x4_t test_vlseg4e16_v_f16mf4x4_tu(vfloat16mf4x4_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg4e16_v_f16mf4x4_tu(vd, rs1, vl); } -vfloat16mf2x4_t test_vlseg4e16_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2x4_t test_vlseg4e16_v_f16mf2x4_tu(vfloat16mf2x4_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg4e16_v_f16mf2x4_tu(vd, rs1, vl); } -vfloat16m1x4_t test_vlseg4e16_v_f16m1x4_tu(vfloat16m1x4_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1x4_t test_vlseg4e16_v_f16m1x4_tu(vfloat16m1x4_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg4e16_v_f16m1x4_tu(vd, rs1, vl); } -vfloat16m2x4_t test_vlseg4e16_v_f16m2x4_tu(vfloat16m2x4_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m2x4_t test_vlseg4e16_v_f16m2x4_tu(vfloat16m2x4_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg4e16_v_f16m2x4_tu(vd, rs1, vl); } -vint16mf4x4_t test_vlseg4e16_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, size_t vl) { +vint16mf4x4_t test_vlseg4e16_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, + size_t vl) { return __riscv_vlseg4e16_v_i16mf4x4_tu(vd, rs1, vl); } -vint16mf2x4_t test_vlseg4e16_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, size_t vl) { +vint16mf2x4_t test_vlseg4e16_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, + size_t vl) { return __riscv_vlseg4e16_v_i16mf2x4_tu(vd, rs1, vl); } -vint16m1x4_t test_vlseg4e16_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, size_t vl) { +vint16m1x4_t test_vlseg4e16_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, + size_t vl) { return __riscv_vlseg4e16_v_i16m1x4_tu(vd, rs1, vl); } -vint16m2x4_t test_vlseg4e16_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, size_t vl) { +vint16m2x4_t test_vlseg4e16_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, + size_t vl) { return __riscv_vlseg4e16_v_i16m2x4_tu(vd, rs1, vl); } -vuint16mf4x4_t test_vlseg4e16_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4x4_t test_vlseg4e16_v_u16mf4x4_tu(vuint16mf4x4_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg4e16_v_u16mf4x4_tu(vd, rs1, vl); } -vuint16mf2x4_t test_vlseg4e16_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2x4_t test_vlseg4e16_v_u16mf2x4_tu(vuint16mf2x4_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg4e16_v_u16mf2x4_tu(vd, rs1, vl); } -vuint16m1x4_t test_vlseg4e16_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1x4_t test_vlseg4e16_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, + size_t vl) { return __riscv_vlseg4e16_v_u16m1x4_tu(vd, rs1, vl); } -vuint16m2x4_t test_vlseg4e16_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, size_t vl) { +vuint16m2x4_t test_vlseg4e16_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, + size_t vl) { return __riscv_vlseg4e16_v_u16m2x4_tu(vd, rs1, vl); } -vfloat16mf4x4_t test_vlseg4e16_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4x4_t test_vlseg4e16_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg4e16_v_f16mf4x4_tum(vm, vd, rs1, vl); } -vfloat16mf2x4_t test_vlseg4e16_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2x4_t test_vlseg4e16_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg4e16_v_f16mf2x4_tum(vm, vd, rs1, vl); } -vfloat16m1x4_t test_vlseg4e16_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1x4_t test_vlseg4e16_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg4e16_v_f16m1x4_tum(vm, vd, rs1, vl); } -vfloat16m2x4_t test_vlseg4e16_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m2x4_t test_vlseg4e16_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg4e16_v_f16m2x4_tum(vm, vd, rs1, vl); } -vint16mf4x4_t test_vlseg4e16_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, size_t vl) { +vint16mf4x4_t test_vlseg4e16_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg4e16_v_i16mf4x4_tum(vm, vd, rs1, vl); } -vint16mf2x4_t test_vlseg4e16_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, size_t vl) { +vint16mf2x4_t test_vlseg4e16_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg4e16_v_i16mf2x4_tum(vm, vd, rs1, vl); } -vint16m1x4_t test_vlseg4e16_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, size_t vl) { +vint16m1x4_t test_vlseg4e16_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg4e16_v_i16m1x4_tum(vm, vd, rs1, vl); } -vint16m2x4_t test_vlseg4e16_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, size_t vl) { +vint16m2x4_t test_vlseg4e16_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg4e16_v_i16m2x4_tum(vm, vd, rs1, vl); } -vuint16mf4x4_t test_vlseg4e16_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4x4_t test_vlseg4e16_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg4e16_v_u16mf4x4_tum(vm, vd, rs1, vl); } -vuint16mf2x4_t test_vlseg4e16_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2x4_t test_vlseg4e16_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg4e16_v_u16mf2x4_tum(vm, vd, rs1, vl); } -vuint16m1x4_t test_vlseg4e16_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1x4_t test_vlseg4e16_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg4e16_v_u16m1x4_tum(vm, vd, rs1, vl); } -vuint16m2x4_t test_vlseg4e16_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, size_t vl) { +vuint16m2x4_t test_vlseg4e16_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg4e16_v_u16m2x4_tum(vm, vd, rs1, vl); } -vfloat16mf4x4_t test_vlseg4e16_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4x4_t test_vlseg4e16_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg4e16_v_f16mf4x4_tumu(vm, vd, rs1, vl); } -vfloat16mf2x4_t test_vlseg4e16_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2x4_t test_vlseg4e16_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg4e16_v_f16mf2x4_tumu(vm, vd, rs1, vl); } -vfloat16m1x4_t test_vlseg4e16_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1x4_t test_vlseg4e16_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg4e16_v_f16m1x4_tumu(vm, vd, rs1, vl); } -vfloat16m2x4_t test_vlseg4e16_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m2x4_t test_vlseg4e16_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg4e16_v_f16m2x4_tumu(vm, vd, rs1, vl); } -vint16mf4x4_t test_vlseg4e16_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, size_t vl) { +vint16mf4x4_t test_vlseg4e16_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg4e16_v_i16mf4x4_tumu(vm, vd, rs1, vl); } -vint16mf2x4_t test_vlseg4e16_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, size_t vl) { +vint16mf2x4_t test_vlseg4e16_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg4e16_v_i16mf2x4_tumu(vm, vd, rs1, vl); } -vint16m1x4_t test_vlseg4e16_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, size_t vl) { +vint16m1x4_t test_vlseg4e16_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg4e16_v_i16m1x4_tumu(vm, vd, rs1, vl); } -vint16m2x4_t test_vlseg4e16_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, size_t vl) { +vint16m2x4_t test_vlseg4e16_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg4e16_v_i16m2x4_tumu(vm, vd, rs1, vl); } -vuint16mf4x4_t test_vlseg4e16_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4x4_t test_vlseg4e16_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg4e16_v_u16mf4x4_tumu(vm, vd, rs1, vl); } -vuint16mf2x4_t test_vlseg4e16_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2x4_t test_vlseg4e16_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg4e16_v_u16mf2x4_tumu(vm, vd, rs1, vl); } -vuint16m1x4_t test_vlseg4e16_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1x4_t test_vlseg4e16_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg4e16_v_u16m1x4_tumu(vm, vd, rs1, vl); } -vuint16m2x4_t test_vlseg4e16_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, size_t vl) { +vuint16m2x4_t test_vlseg4e16_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg4e16_v_u16m2x4_tumu(vm, vd, rs1, vl); } -vfloat16mf4x4_t test_vlseg4e16_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4x4_t test_vlseg4e16_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg4e16_v_f16mf4x4_mu(vm, vd, rs1, vl); } -vfloat16mf2x4_t test_vlseg4e16_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2x4_t test_vlseg4e16_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg4e16_v_f16mf2x4_mu(vm, vd, rs1, vl); } -vfloat16m1x4_t test_vlseg4e16_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1x4_t test_vlseg4e16_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg4e16_v_f16m1x4_mu(vm, vd, rs1, vl); } -vfloat16m2x4_t test_vlseg4e16_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m2x4_t test_vlseg4e16_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg4e16_v_f16m2x4_mu(vm, vd, rs1, vl); } -vint16mf4x4_t test_vlseg4e16_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, size_t vl) { +vint16mf4x4_t test_vlseg4e16_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg4e16_v_i16mf4x4_mu(vm, vd, rs1, vl); } -vint16mf2x4_t test_vlseg4e16_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, size_t vl) { +vint16mf2x4_t test_vlseg4e16_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg4e16_v_i16mf2x4_mu(vm, vd, rs1, vl); } -vint16m1x4_t test_vlseg4e16_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, size_t vl) { +vint16m1x4_t test_vlseg4e16_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg4e16_v_i16m1x4_mu(vm, vd, rs1, vl); } -vint16m2x4_t test_vlseg4e16_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, size_t vl) { +vint16m2x4_t test_vlseg4e16_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg4e16_v_i16m2x4_mu(vm, vd, rs1, vl); } -vuint16mf4x4_t test_vlseg4e16_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4x4_t test_vlseg4e16_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg4e16_v_u16mf4x4_mu(vm, vd, rs1, vl); } -vuint16mf2x4_t test_vlseg4e16_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2x4_t test_vlseg4e16_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg4e16_v_u16mf2x4_mu(vm, vd, rs1, vl); } -vuint16m1x4_t test_vlseg4e16_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1x4_t test_vlseg4e16_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg4e16_v_u16m1x4_mu(vm, vd, rs1, vl); } -vuint16m2x4_t test_vlseg4e16_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, size_t vl) { +vuint16m2x4_t test_vlseg4e16_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg4e16_v_u16m2x4_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg4e16ff.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg4e16ff.c index b144b32ad..6fa59034c 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg4e16ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg4e16ff.c @@ -1,199 +1,297 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_tu(vfloat16mf4x4_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_f16mf4x4_tu(vd, rs1, new_vl, vl); } -vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_tu(vfloat16mf2x4_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_f16mf2x4_tu(vd, rs1, new_vl, vl); } -vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_tu(vfloat16m1x4_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_tu(vfloat16m1x4_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_f16m1x4_tu(vd, rs1, new_vl, vl); } -vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_tu(vfloat16m2x4_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_tu(vfloat16m2x4_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_f16m2x4_tu(vd, rs1, new_vl, vl); } -vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_tu(vint16mf4x4_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e16ff_v_i16mf4x4_tu(vd, rs1, new_vl, vl); } -vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_tu(vint16mf2x4_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e16ff_v_i16mf2x4_tu(vd, rs1, new_vl, vl); } -vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_i16m1x4_tu(vd, rs1, new_vl, vl); } -vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_i16m2x4_tu(vd, rs1, new_vl, vl); } -vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_tu(vuint16mf4x4_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_u16mf4x4_tu(vd, rs1, new_vl, vl); } -vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_tu(vuint16mf2x4_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_u16mf2x4_tu(vd, rs1, new_vl, vl); } -vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_tu(vuint16m1x4_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e16ff_v_u16m1x4_tu(vd, rs1, new_vl, vl); } -vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_tu(vuint16m2x4_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e16ff_v_u16m2x4_tu(vd, rs1, new_vl, vl); } -vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_tum(vbool64_t vm, + vfloat16mf4x4_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_f16mf4x4_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_tum(vbool32_t vm, + vfloat16mf2x4_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_f16mf2x4_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_f16m1x4_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_f16m2x4_tum(vm, vd, rs1, new_vl, vl); } -vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_i16mf4x4_tum(vm, vd, rs1, new_vl, vl); } -vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_i16mf2x4_tum(vm, vd, rs1, new_vl, vl); } -vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e16ff_v_i16m1x4_tum(vm, vd, rs1, new_vl, vl); } -vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e16ff_v_i16m2x4_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_u16mf4x4_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_u16mf2x4_tum(vm, vd, rs1, new_vl, vl); } -vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_u16m1x4_tum(vm, vd, rs1, new_vl, vl); } -vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_u16m2x4_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_tumu(vbool64_t vm, + vfloat16mf4x4_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_f16mf4x4_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_tumu(vbool32_t vm, + vfloat16mf2x4_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_f16mf2x4_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_f16m1x4_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_f16m2x4_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_i16mf4x4_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_i16mf2x4_tumu(vm, vd, rs1, new_vl, vl); } -vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e16ff_v_i16m1x4_tumu(vm, vd, rs1, new_vl, vl); } -vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e16ff_v_i16m2x4_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_u16mf4x4_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_u16mf2x4_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_u16m1x4_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_u16m2x4_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4x4_t test_vlseg4e16ff_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_f16mf4x4_mu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2x4_t test_vlseg4e16ff_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_f16mf2x4_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1x4_t test_vlseg4e16ff_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_f16m1x4_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m2x4_t test_vlseg4e16ff_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_f16m2x4_mu(vm, vd, rs1, new_vl, vl); } -vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4x4_t test_vlseg4e16ff_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e16ff_v_i16mf4x4_mu(vm, vd, rs1, new_vl, vl); } -vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2x4_t test_vlseg4e16ff_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e16ff_v_i16mf2x4_mu(vm, vd, rs1, new_vl, vl); } -vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1x4_t test_vlseg4e16ff_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e16ff_v_i16m1x4_mu(vm, vd, rs1, new_vl, vl); } -vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m2x4_t test_vlseg4e16ff_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e16ff_v_i16m2x4_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4x4_t test_vlseg4e16ff_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_u16mf4x4_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2x4_t test_vlseg4e16ff_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e16ff_v_u16mf2x4_mu(vm, vd, rs1, new_vl, vl); } -vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1x4_t test_vlseg4e16ff_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e16ff_v_u16m1x4_mu(vm, vd, rs1, new_vl, vl); } -vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m2x4_t test_vlseg4e16ff_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e16ff_v_u16m2x4_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg4e32.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg4e32.c index b2de84ad5..9a3f61bbd 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg4e32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg4e32.c @@ -1,151 +1,187 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32mf2x4_t test_vlseg4e32_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float *rs1, size_t vl) { +vfloat32mf2x4_t test_vlseg4e32_v_f32mf2x4_tu(vfloat32mf2x4_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg4e32_v_f32mf2x4_tu(vd, rs1, vl); } -vfloat32m1x4_t test_vlseg4e32_v_f32m1x4_tu(vfloat32m1x4_t vd, const float *rs1, size_t vl) { +vfloat32m1x4_t test_vlseg4e32_v_f32m1x4_tu(vfloat32m1x4_t vd, const float *rs1, + size_t vl) { return __riscv_vlseg4e32_v_f32m1x4_tu(vd, rs1, vl); } -vfloat32m2x4_t test_vlseg4e32_v_f32m2x4_tu(vfloat32m2x4_t vd, const float *rs1, size_t vl) { +vfloat32m2x4_t test_vlseg4e32_v_f32m2x4_tu(vfloat32m2x4_t vd, const float *rs1, + size_t vl) { return __riscv_vlseg4e32_v_f32m2x4_tu(vd, rs1, vl); } -vint32mf2x4_t test_vlseg4e32_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, size_t vl) { +vint32mf2x4_t test_vlseg4e32_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, + size_t vl) { return __riscv_vlseg4e32_v_i32mf2x4_tu(vd, rs1, vl); } -vint32m1x4_t test_vlseg4e32_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, size_t vl) { +vint32m1x4_t test_vlseg4e32_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, + size_t vl) { return __riscv_vlseg4e32_v_i32m1x4_tu(vd, rs1, vl); } -vint32m2x4_t test_vlseg4e32_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, size_t vl) { +vint32m2x4_t test_vlseg4e32_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, + size_t vl) { return __riscv_vlseg4e32_v_i32m2x4_tu(vd, rs1, vl); } -vuint32mf2x4_t test_vlseg4e32_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2x4_t test_vlseg4e32_v_u32mf2x4_tu(vuint32mf2x4_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg4e32_v_u32mf2x4_tu(vd, rs1, vl); } -vuint32m1x4_t test_vlseg4e32_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1x4_t test_vlseg4e32_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, + size_t vl) { return __riscv_vlseg4e32_v_u32m1x4_tu(vd, rs1, vl); } -vuint32m2x4_t test_vlseg4e32_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, size_t vl) { +vuint32m2x4_t test_vlseg4e32_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, + size_t vl) { return __riscv_vlseg4e32_v_u32m2x4_tu(vd, rs1, vl); } -vfloat32mf2x4_t test_vlseg4e32_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, size_t vl) { +vfloat32mf2x4_t test_vlseg4e32_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg4e32_v_f32mf2x4_tum(vm, vd, rs1, vl); } -vfloat32m1x4_t test_vlseg4e32_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, size_t vl) { +vfloat32m1x4_t test_vlseg4e32_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg4e32_v_f32m1x4_tum(vm, vd, rs1, vl); } -vfloat32m2x4_t test_vlseg4e32_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, size_t vl) { +vfloat32m2x4_t test_vlseg4e32_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg4e32_v_f32m2x4_tum(vm, vd, rs1, vl); } -vint32mf2x4_t test_vlseg4e32_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, size_t vl) { +vint32mf2x4_t test_vlseg4e32_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg4e32_v_i32mf2x4_tum(vm, vd, rs1, vl); } -vint32m1x4_t test_vlseg4e32_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, size_t vl) { +vint32m1x4_t test_vlseg4e32_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg4e32_v_i32m1x4_tum(vm, vd, rs1, vl); } -vint32m2x4_t test_vlseg4e32_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, size_t vl) { +vint32m2x4_t test_vlseg4e32_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg4e32_v_i32m2x4_tum(vm, vd, rs1, vl); } -vuint32mf2x4_t test_vlseg4e32_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2x4_t test_vlseg4e32_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg4e32_v_u32mf2x4_tum(vm, vd, rs1, vl); } -vuint32m1x4_t test_vlseg4e32_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1x4_t test_vlseg4e32_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg4e32_v_u32m1x4_tum(vm, vd, rs1, vl); } -vuint32m2x4_t test_vlseg4e32_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, size_t vl) { +vuint32m2x4_t test_vlseg4e32_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg4e32_v_u32m2x4_tum(vm, vd, rs1, vl); } -vfloat32mf2x4_t test_vlseg4e32_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, size_t vl) { +vfloat32mf2x4_t test_vlseg4e32_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg4e32_v_f32mf2x4_tumu(vm, vd, rs1, vl); } -vfloat32m1x4_t test_vlseg4e32_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, size_t vl) { +vfloat32m1x4_t test_vlseg4e32_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg4e32_v_f32m1x4_tumu(vm, vd, rs1, vl); } -vfloat32m2x4_t test_vlseg4e32_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, size_t vl) { +vfloat32m2x4_t test_vlseg4e32_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg4e32_v_f32m2x4_tumu(vm, vd, rs1, vl); } -vint32mf2x4_t test_vlseg4e32_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, size_t vl) { +vint32mf2x4_t test_vlseg4e32_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg4e32_v_i32mf2x4_tumu(vm, vd, rs1, vl); } -vint32m1x4_t test_vlseg4e32_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, size_t vl) { +vint32m1x4_t test_vlseg4e32_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg4e32_v_i32m1x4_tumu(vm, vd, rs1, vl); } -vint32m2x4_t test_vlseg4e32_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, size_t vl) { +vint32m2x4_t test_vlseg4e32_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg4e32_v_i32m2x4_tumu(vm, vd, rs1, vl); } -vuint32mf2x4_t test_vlseg4e32_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2x4_t test_vlseg4e32_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg4e32_v_u32mf2x4_tumu(vm, vd, rs1, vl); } -vuint32m1x4_t test_vlseg4e32_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1x4_t test_vlseg4e32_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg4e32_v_u32m1x4_tumu(vm, vd, rs1, vl); } -vuint32m2x4_t test_vlseg4e32_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, size_t vl) { +vuint32m2x4_t test_vlseg4e32_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg4e32_v_u32m2x4_tumu(vm, vd, rs1, vl); } -vfloat32mf2x4_t test_vlseg4e32_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, size_t vl) { +vfloat32mf2x4_t test_vlseg4e32_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg4e32_v_f32mf2x4_mu(vm, vd, rs1, vl); } -vfloat32m1x4_t test_vlseg4e32_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, size_t vl) { +vfloat32m1x4_t test_vlseg4e32_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg4e32_v_f32m1x4_mu(vm, vd, rs1, vl); } -vfloat32m2x4_t test_vlseg4e32_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, size_t vl) { +vfloat32m2x4_t test_vlseg4e32_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg4e32_v_f32m2x4_mu(vm, vd, rs1, vl); } -vint32mf2x4_t test_vlseg4e32_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, size_t vl) { +vint32mf2x4_t test_vlseg4e32_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg4e32_v_i32mf2x4_mu(vm, vd, rs1, vl); } -vint32m1x4_t test_vlseg4e32_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, size_t vl) { +vint32m1x4_t test_vlseg4e32_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg4e32_v_i32m1x4_mu(vm, vd, rs1, vl); } -vint32m2x4_t test_vlseg4e32_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, size_t vl) { +vint32m2x4_t test_vlseg4e32_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg4e32_v_i32m2x4_mu(vm, vd, rs1, vl); } -vuint32mf2x4_t test_vlseg4e32_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2x4_t test_vlseg4e32_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg4e32_v_u32mf2x4_mu(vm, vd, rs1, vl); } -vuint32m1x4_t test_vlseg4e32_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1x4_t test_vlseg4e32_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg4e32_v_u32m1x4_mu(vm, vd, rs1, vl); } -vuint32m2x4_t test_vlseg4e32_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, size_t vl) { +vuint32m2x4_t test_vlseg4e32_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg4e32_v_u32m2x4_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg4e32ff.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg4e32ff.c index 49326337a..1b526432a 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg4e32ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg4e32ff.c @@ -1,151 +1,223 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_tu(vfloat32mf2x4_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e32ff_v_f32mf2x4_tu(vd, rs1, new_vl, vl); } -vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_tu(vfloat32m1x4_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_tu(vfloat32m1x4_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e32ff_v_f32m1x4_tu(vd, rs1, new_vl, vl); } -vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_tu(vfloat32m2x4_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_tu(vfloat32m2x4_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e32ff_v_f32m2x4_tu(vd, rs1, new_vl, vl); } -vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_tu(vint32mf2x4_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e32ff_v_i32mf2x4_tu(vd, rs1, new_vl, vl); } -vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e32ff_v_i32m1x4_tu(vd, rs1, new_vl, vl); } -vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e32ff_v_i32m2x4_tu(vd, rs1, new_vl, vl); } -vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_tu(vuint32mf2x4_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e32ff_v_u32mf2x4_tu(vd, rs1, new_vl, vl); } -vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_tu(vuint32m1x4_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e32ff_v_u32m1x4_tu(vd, rs1, new_vl, vl); } -vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_tu(vuint32m2x4_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e32ff_v_u32m2x4_tu(vd, rs1, new_vl, vl); } -vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_tum(vbool64_t vm, + vfloat32mf2x4_t vd, + const float *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e32ff_v_f32mf2x4_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e32ff_v_f32m1x4_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e32ff_v_f32m2x4_tum(vm, vd, rs1, new_vl, vl); } -vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e32ff_v_i32mf2x4_tum(vm, vd, rs1, new_vl, vl); } -vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e32ff_v_i32m1x4_tum(vm, vd, rs1, new_vl, vl); } -vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e32ff_v_i32m2x4_tum(vm, vd, rs1, new_vl, vl); } -vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e32ff_v_u32mf2x4_tum(vm, vd, rs1, new_vl, vl); } -vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e32ff_v_u32m1x4_tum(vm, vd, rs1, new_vl, vl); } -vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e32ff_v_u32m2x4_tum(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_tumu(vbool64_t vm, + vfloat32mf2x4_t vd, + const float *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e32ff_v_f32mf2x4_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e32ff_v_f32m1x4_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e32ff_v_f32m2x4_tumu(vm, vd, rs1, new_vl, vl); } -vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e32ff_v_i32mf2x4_tumu(vm, vd, rs1, new_vl, vl); } -vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e32ff_v_i32m1x4_tumu(vm, vd, rs1, new_vl, vl); } -vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e32ff_v_i32m2x4_tumu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e32ff_v_u32mf2x4_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e32ff_v_u32m1x4_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e32ff_v_u32m2x4_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2x4_t test_vlseg4e32ff_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e32ff_v_f32mf2x4_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1x4_t test_vlseg4e32ff_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e32ff_v_f32m1x4_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m2x4_t test_vlseg4e32ff_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e32ff_v_f32m2x4_mu(vm, vd, rs1, new_vl, vl); } -vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2x4_t test_vlseg4e32ff_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e32ff_v_i32mf2x4_mu(vm, vd, rs1, new_vl, vl); } -vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1x4_t test_vlseg4e32ff_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e32ff_v_i32m1x4_mu(vm, vd, rs1, new_vl, vl); } -vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m2x4_t test_vlseg4e32ff_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e32ff_v_i32m2x4_mu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2x4_t test_vlseg4e32ff_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e32ff_v_u32mf2x4_mu(vm, vd, rs1, new_vl, vl); } -vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1x4_t test_vlseg4e32ff_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e32ff_v_u32m1x4_mu(vm, vd, rs1, new_vl, vl); } -vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m2x4_t test_vlseg4e32ff_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e32ff_v_u32m2x4_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg4e64.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg4e64.c index 17525d0ba..c0047ad94 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg4e64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg4e64.c @@ -1,103 +1,127 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat64m1x4_t test_vlseg4e64_v_f64m1x4_tu(vfloat64m1x4_t vd, const double *rs1, size_t vl) { +vfloat64m1x4_t test_vlseg4e64_v_f64m1x4_tu(vfloat64m1x4_t vd, const double *rs1, + size_t vl) { return __riscv_vlseg4e64_v_f64m1x4_tu(vd, rs1, vl); } -vfloat64m2x4_t test_vlseg4e64_v_f64m2x4_tu(vfloat64m2x4_t vd, const double *rs1, size_t vl) { +vfloat64m2x4_t test_vlseg4e64_v_f64m2x4_tu(vfloat64m2x4_t vd, const double *rs1, + size_t vl) { return __riscv_vlseg4e64_v_f64m2x4_tu(vd, rs1, vl); } -vint64m1x4_t test_vlseg4e64_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, size_t vl) { +vint64m1x4_t test_vlseg4e64_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, + size_t vl) { return __riscv_vlseg4e64_v_i64m1x4_tu(vd, rs1, vl); } -vint64m2x4_t test_vlseg4e64_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, size_t vl) { +vint64m2x4_t test_vlseg4e64_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, + size_t vl) { return __riscv_vlseg4e64_v_i64m2x4_tu(vd, rs1, vl); } -vuint64m1x4_t test_vlseg4e64_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1x4_t test_vlseg4e64_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, + size_t vl) { return __riscv_vlseg4e64_v_u64m1x4_tu(vd, rs1, vl); } -vuint64m2x4_t test_vlseg4e64_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, size_t vl) { +vuint64m2x4_t test_vlseg4e64_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, + size_t vl) { return __riscv_vlseg4e64_v_u64m2x4_tu(vd, rs1, vl); } -vfloat64m1x4_t test_vlseg4e64_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, size_t vl) { +vfloat64m1x4_t test_vlseg4e64_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg4e64_v_f64m1x4_tum(vm, vd, rs1, vl); } -vfloat64m2x4_t test_vlseg4e64_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, size_t vl) { +vfloat64m2x4_t test_vlseg4e64_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg4e64_v_f64m2x4_tum(vm, vd, rs1, vl); } -vint64m1x4_t test_vlseg4e64_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, size_t vl) { +vint64m1x4_t test_vlseg4e64_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg4e64_v_i64m1x4_tum(vm, vd, rs1, vl); } -vint64m2x4_t test_vlseg4e64_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, size_t vl) { +vint64m2x4_t test_vlseg4e64_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg4e64_v_i64m2x4_tum(vm, vd, rs1, vl); } -vuint64m1x4_t test_vlseg4e64_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1x4_t test_vlseg4e64_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg4e64_v_u64m1x4_tum(vm, vd, rs1, vl); } -vuint64m2x4_t test_vlseg4e64_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, size_t vl) { +vuint64m2x4_t test_vlseg4e64_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg4e64_v_u64m2x4_tum(vm, vd, rs1, vl); } -vfloat64m1x4_t test_vlseg4e64_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, size_t vl) { +vfloat64m1x4_t test_vlseg4e64_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg4e64_v_f64m1x4_tumu(vm, vd, rs1, vl); } -vfloat64m2x4_t test_vlseg4e64_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, size_t vl) { +vfloat64m2x4_t test_vlseg4e64_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg4e64_v_f64m2x4_tumu(vm, vd, rs1, vl); } -vint64m1x4_t test_vlseg4e64_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, size_t vl) { +vint64m1x4_t test_vlseg4e64_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg4e64_v_i64m1x4_tumu(vm, vd, rs1, vl); } -vint64m2x4_t test_vlseg4e64_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, size_t vl) { +vint64m2x4_t test_vlseg4e64_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg4e64_v_i64m2x4_tumu(vm, vd, rs1, vl); } -vuint64m1x4_t test_vlseg4e64_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1x4_t test_vlseg4e64_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg4e64_v_u64m1x4_tumu(vm, vd, rs1, vl); } -vuint64m2x4_t test_vlseg4e64_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, size_t vl) { +vuint64m2x4_t test_vlseg4e64_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg4e64_v_u64m2x4_tumu(vm, vd, rs1, vl); } -vfloat64m1x4_t test_vlseg4e64_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, size_t vl) { +vfloat64m1x4_t test_vlseg4e64_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg4e64_v_f64m1x4_mu(vm, vd, rs1, vl); } -vfloat64m2x4_t test_vlseg4e64_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, size_t vl) { +vfloat64m2x4_t test_vlseg4e64_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg4e64_v_f64m2x4_mu(vm, vd, rs1, vl); } -vint64m1x4_t test_vlseg4e64_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, size_t vl) { +vint64m1x4_t test_vlseg4e64_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg4e64_v_i64m1x4_mu(vm, vd, rs1, vl); } -vint64m2x4_t test_vlseg4e64_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, size_t vl) { +vint64m2x4_t test_vlseg4e64_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg4e64_v_i64m2x4_mu(vm, vd, rs1, vl); } -vuint64m1x4_t test_vlseg4e64_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1x4_t test_vlseg4e64_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg4e64_v_u64m1x4_mu(vm, vd, rs1, vl); } -vuint64m2x4_t test_vlseg4e64_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, size_t vl) { +vuint64m2x4_t test_vlseg4e64_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg4e64_v_u64m2x4_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg4e64ff.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg4e64ff.c index 2008337ce..04eacb04a 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg4e64ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg4e64ff.c @@ -1,103 +1,149 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_tu(vfloat64m1x4_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_tu(vfloat64m1x4_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e64ff_v_f64m1x4_tu(vd, rs1, new_vl, vl); } -vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_tu(vfloat64m2x4_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_tu(vfloat64m2x4_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e64ff_v_f64m2x4_tu(vd, rs1, new_vl, vl); } -vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e64ff_v_i64m1x4_tu(vd, rs1, new_vl, vl); } -vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e64ff_v_i64m2x4_tu(vd, rs1, new_vl, vl); } -vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_tu(vuint64m1x4_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e64ff_v_u64m1x4_tu(vd, rs1, new_vl, vl); } -vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_tu(vuint64m2x4_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e64ff_v_u64m2x4_tu(vd, rs1, new_vl, vl); } -vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e64ff_v_f64m1x4_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e64ff_v_f64m2x4_tum(vm, vd, rs1, new_vl, vl); } -vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e64ff_v_i64m1x4_tum(vm, vd, rs1, new_vl, vl); } -vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e64ff_v_i64m2x4_tum(vm, vd, rs1, new_vl, vl); } -vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e64ff_v_u64m1x4_tum(vm, vd, rs1, new_vl, vl); } -vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e64ff_v_u64m2x4_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e64ff_v_f64m1x4_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e64ff_v_f64m2x4_tumu(vm, vd, rs1, new_vl, vl); } -vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e64ff_v_i64m1x4_tumu(vm, vd, rs1, new_vl, vl); } -vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e64ff_v_i64m2x4_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e64ff_v_u64m1x4_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e64ff_v_u64m2x4_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1x4_t test_vlseg4e64ff_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e64ff_v_f64m1x4_mu(vm, vd, rs1, new_vl, vl); } -vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m2x4_t test_vlseg4e64ff_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e64ff_v_f64m2x4_mu(vm, vd, rs1, new_vl, vl); } -vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1x4_t test_vlseg4e64ff_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e64ff_v_i64m1x4_mu(vm, vd, rs1, new_vl, vl); } -vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m2x4_t test_vlseg4e64ff_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e64ff_v_i64m2x4_mu(vm, vd, rs1, new_vl, vl); } -vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1x4_t test_vlseg4e64ff_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e64ff_v_u64m1x4_mu(vm, vd, rs1, new_vl, vl); } -vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m2x4_t test_vlseg4e64ff_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e64ff_v_u64m2x4_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg4e8.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg4e8.c index 30723cca6..9d61554c4 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg4e8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg4e8.c @@ -5,162 +5,202 @@ #include -vint8mf8x4_t test_vlseg4e8_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, size_t vl) { +vint8mf8x4_t test_vlseg4e8_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg4e8_v_i8mf8x4_tu(vd, rs1, vl); } -vint8mf4x4_t test_vlseg4e8_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, size_t vl) { +vint8mf4x4_t test_vlseg4e8_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg4e8_v_i8mf4x4_tu(vd, rs1, vl); } -vint8mf2x4_t test_vlseg4e8_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, size_t vl) { +vint8mf2x4_t test_vlseg4e8_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg4e8_v_i8mf2x4_tu(vd, rs1, vl); } -vint8m1x4_t test_vlseg4e8_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, size_t vl) { +vint8m1x4_t test_vlseg4e8_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg4e8_v_i8m1x4_tu(vd, rs1, vl); } -vint8m2x4_t test_vlseg4e8_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, size_t vl) { +vint8m2x4_t test_vlseg4e8_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg4e8_v_i8m2x4_tu(vd, rs1, vl); } -vuint8mf8x4_t test_vlseg4e8_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8x4_t test_vlseg4e8_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg4e8_v_u8mf8x4_tu(vd, rs1, vl); } -vuint8mf4x4_t test_vlseg4e8_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4x4_t test_vlseg4e8_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg4e8_v_u8mf4x4_tu(vd, rs1, vl); } -vuint8mf2x4_t test_vlseg4e8_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2x4_t test_vlseg4e8_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg4e8_v_u8mf2x4_tu(vd, rs1, vl); } -vuint8m1x4_t test_vlseg4e8_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, size_t vl) { +vuint8m1x4_t test_vlseg4e8_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg4e8_v_u8m1x4_tu(vd, rs1, vl); } -vuint8m2x4_t test_vlseg4e8_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, size_t vl) { +vuint8m2x4_t test_vlseg4e8_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg4e8_v_u8m2x4_tu(vd, rs1, vl); } -vint8mf8x4_t test_vlseg4e8_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, size_t vl) { +vint8mf8x4_t test_vlseg4e8_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg4e8_v_i8mf8x4_tum(vm, vd, rs1, vl); } -vint8mf4x4_t test_vlseg4e8_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, size_t vl) { +vint8mf4x4_t test_vlseg4e8_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg4e8_v_i8mf4x4_tum(vm, vd, rs1, vl); } -vint8mf2x4_t test_vlseg4e8_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, size_t vl) { +vint8mf2x4_t test_vlseg4e8_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg4e8_v_i8mf2x4_tum(vm, vd, rs1, vl); } -vint8m1x4_t test_vlseg4e8_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, size_t vl) { +vint8m1x4_t test_vlseg4e8_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg4e8_v_i8m1x4_tum(vm, vd, rs1, vl); } -vint8m2x4_t test_vlseg4e8_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, size_t vl) { +vint8m2x4_t test_vlseg4e8_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg4e8_v_i8m2x4_tum(vm, vd, rs1, vl); } -vuint8mf8x4_t test_vlseg4e8_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8x4_t test_vlseg4e8_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg4e8_v_u8mf8x4_tum(vm, vd, rs1, vl); } -vuint8mf4x4_t test_vlseg4e8_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4x4_t test_vlseg4e8_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg4e8_v_u8mf4x4_tum(vm, vd, rs1, vl); } -vuint8mf2x4_t test_vlseg4e8_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2x4_t test_vlseg4e8_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg4e8_v_u8mf2x4_tum(vm, vd, rs1, vl); } -vuint8m1x4_t test_vlseg4e8_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, size_t vl) { +vuint8m1x4_t test_vlseg4e8_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg4e8_v_u8m1x4_tum(vm, vd, rs1, vl); } -vuint8m2x4_t test_vlseg4e8_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, size_t vl) { +vuint8m2x4_t test_vlseg4e8_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg4e8_v_u8m2x4_tum(vm, vd, rs1, vl); } -vint8mf8x4_t test_vlseg4e8_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, size_t vl) { +vint8mf8x4_t test_vlseg4e8_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg4e8_v_i8mf8x4_tumu(vm, vd, rs1, vl); } -vint8mf4x4_t test_vlseg4e8_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, size_t vl) { +vint8mf4x4_t test_vlseg4e8_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg4e8_v_i8mf4x4_tumu(vm, vd, rs1, vl); } -vint8mf2x4_t test_vlseg4e8_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, size_t vl) { +vint8mf2x4_t test_vlseg4e8_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg4e8_v_i8mf2x4_tumu(vm, vd, rs1, vl); } -vint8m1x4_t test_vlseg4e8_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, size_t vl) { +vint8m1x4_t test_vlseg4e8_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg4e8_v_i8m1x4_tumu(vm, vd, rs1, vl); } -vint8m2x4_t test_vlseg4e8_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, size_t vl) { +vint8m2x4_t test_vlseg4e8_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg4e8_v_i8m2x4_tumu(vm, vd, rs1, vl); } -vuint8mf8x4_t test_vlseg4e8_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8x4_t test_vlseg4e8_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg4e8_v_u8mf8x4_tumu(vm, vd, rs1, vl); } -vuint8mf4x4_t test_vlseg4e8_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4x4_t test_vlseg4e8_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg4e8_v_u8mf4x4_tumu(vm, vd, rs1, vl); } -vuint8mf2x4_t test_vlseg4e8_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2x4_t test_vlseg4e8_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg4e8_v_u8mf2x4_tumu(vm, vd, rs1, vl); } -vuint8m1x4_t test_vlseg4e8_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, size_t vl) { +vuint8m1x4_t test_vlseg4e8_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg4e8_v_u8m1x4_tumu(vm, vd, rs1, vl); } -vuint8m2x4_t test_vlseg4e8_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, size_t vl) { +vuint8m2x4_t test_vlseg4e8_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg4e8_v_u8m2x4_tumu(vm, vd, rs1, vl); } -vint8mf8x4_t test_vlseg4e8_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, size_t vl) { +vint8mf8x4_t test_vlseg4e8_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg4e8_v_i8mf8x4_mu(vm, vd, rs1, vl); } -vint8mf4x4_t test_vlseg4e8_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, size_t vl) { +vint8mf4x4_t test_vlseg4e8_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg4e8_v_i8mf4x4_mu(vm, vd, rs1, vl); } -vint8mf2x4_t test_vlseg4e8_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, size_t vl) { +vint8mf2x4_t test_vlseg4e8_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg4e8_v_i8mf2x4_mu(vm, vd, rs1, vl); } -vint8m1x4_t test_vlseg4e8_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, size_t vl) { +vint8m1x4_t test_vlseg4e8_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg4e8_v_i8m1x4_mu(vm, vd, rs1, vl); } -vint8m2x4_t test_vlseg4e8_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, size_t vl) { +vint8m2x4_t test_vlseg4e8_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg4e8_v_i8m2x4_mu(vm, vd, rs1, vl); } -vuint8mf8x4_t test_vlseg4e8_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8x4_t test_vlseg4e8_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg4e8_v_u8mf8x4_mu(vm, vd, rs1, vl); } -vuint8mf4x4_t test_vlseg4e8_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4x4_t test_vlseg4e8_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg4e8_v_u8mf4x4_mu(vm, vd, rs1, vl); } -vuint8mf2x4_t test_vlseg4e8_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2x4_t test_vlseg4e8_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg4e8_v_u8mf2x4_mu(vm, vd, rs1, vl); } -vuint8m1x4_t test_vlseg4e8_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, size_t vl) { +vuint8m1x4_t test_vlseg4e8_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg4e8_v_u8m1x4_mu(vm, vd, rs1, vl); } -vuint8m2x4_t test_vlseg4e8_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, size_t vl) { +vuint8m2x4_t test_vlseg4e8_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg4e8_v_u8m2x4_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg4e8ff.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg4e8ff.c index d803d3f98..e2d3a6ecd 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg4e8ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg4e8ff.c @@ -1,167 +1,237 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e8ff_v_i8mf8x4_tu(vd, rs1, new_vl, vl); } -vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e8ff_v_i8mf4x4_tu(vd, rs1, new_vl, vl); } -vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e8ff_v_i8mf2x4_tu(vd, rs1, new_vl, vl); } -vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e8ff_v_i8m1x4_tu(vd, rs1, new_vl, vl); } -vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e8ff_v_i8m2x4_tu(vd, rs1, new_vl, vl); } -vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e8ff_v_u8mf8x4_tu(vd, rs1, new_vl, vl); } -vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e8ff_v_u8mf4x4_tu(vd, rs1, new_vl, vl); } -vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e8ff_v_u8mf2x4_tu(vd, rs1, new_vl, vl); } -vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e8ff_v_u8m1x4_tu(vd, rs1, new_vl, vl); } -vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg4e8ff_v_u8m2x4_tu(vd, rs1, new_vl, vl); } -vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e8ff_v_i8mf8x4_tum(vm, vd, rs1, new_vl, vl); } -vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e8ff_v_i8mf4x4_tum(vm, vd, rs1, new_vl, vl); } -vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e8ff_v_i8mf2x4_tum(vm, vd, rs1, new_vl, vl); } -vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e8ff_v_i8m1x4_tum(vm, vd, rs1, new_vl, vl); } -vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e8ff_v_i8m2x4_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e8ff_v_u8mf8x4_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e8ff_v_u8mf4x4_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e8ff_v_u8mf2x4_tum(vm, vd, rs1, new_vl, vl); } -vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e8ff_v_u8m1x4_tum(vm, vd, rs1, new_vl, vl); } -vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e8ff_v_u8m2x4_tum(vm, vd, rs1, new_vl, vl); } -vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e8ff_v_i8mf8x4_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e8ff_v_i8mf4x4_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e8ff_v_i8mf2x4_tumu(vm, vd, rs1, new_vl, vl); } -vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e8ff_v_i8m1x4_tumu(vm, vd, rs1, new_vl, vl); } -vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e8ff_v_i8m2x4_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e8ff_v_u8mf8x4_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e8ff_v_u8mf4x4_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e8ff_v_u8mf2x4_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e8ff_v_u8m1x4_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e8ff_v_u8m2x4_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8x4_t test_vlseg4e8ff_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e8ff_v_i8mf8x4_mu(vm, vd, rs1, new_vl, vl); } -vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4x4_t test_vlseg4e8ff_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e8ff_v_i8mf4x4_mu(vm, vd, rs1, new_vl, vl); } -vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2x4_t test_vlseg4e8ff_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e8ff_v_i8mf2x4_mu(vm, vd, rs1, new_vl, vl); } -vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1x4_t test_vlseg4e8ff_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e8ff_v_i8m1x4_mu(vm, vd, rs1, new_vl, vl); } -vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m2x4_t test_vlseg4e8ff_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e8ff_v_i8m2x4_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8x4_t test_vlseg4e8ff_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e8ff_v_u8mf8x4_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4x4_t test_vlseg4e8ff_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e8ff_v_u8mf4x4_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2x4_t test_vlseg4e8ff_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e8ff_v_u8mf2x4_mu(vm, vd, rs1, new_vl, vl); } -vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1x4_t test_vlseg4e8ff_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e8ff_v_u8m1x4_mu(vm, vd, rs1, new_vl, vl); } -vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m2x4_t test_vlseg4e8ff_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg4e8ff_v_u8m2x4_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg5e16.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg5e16.c index e599e9716..ba31772b7 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg5e16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg5e16.c @@ -1,151 +1,187 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x5_t test_vlseg5e16_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4x5_t test_vlseg5e16_v_f16mf4x5_tu(vfloat16mf4x5_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg5e16_v_f16mf4x5_tu(vd, rs1, vl); } -vfloat16mf2x5_t test_vlseg5e16_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2x5_t test_vlseg5e16_v_f16mf2x5_tu(vfloat16mf2x5_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg5e16_v_f16mf2x5_tu(vd, rs1, vl); } -vfloat16m1x5_t test_vlseg5e16_v_f16m1x5_tu(vfloat16m1x5_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1x5_t test_vlseg5e16_v_f16m1x5_tu(vfloat16m1x5_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg5e16_v_f16m1x5_tu(vd, rs1, vl); } -vint16mf4x5_t test_vlseg5e16_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, size_t vl) { +vint16mf4x5_t test_vlseg5e16_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, + size_t vl) { return __riscv_vlseg5e16_v_i16mf4x5_tu(vd, rs1, vl); } -vint16mf2x5_t test_vlseg5e16_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, size_t vl) { +vint16mf2x5_t test_vlseg5e16_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, + size_t vl) { return __riscv_vlseg5e16_v_i16mf2x5_tu(vd, rs1, vl); } -vint16m1x5_t test_vlseg5e16_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, size_t vl) { +vint16m1x5_t test_vlseg5e16_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, + size_t vl) { return __riscv_vlseg5e16_v_i16m1x5_tu(vd, rs1, vl); } -vuint16mf4x5_t test_vlseg5e16_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4x5_t test_vlseg5e16_v_u16mf4x5_tu(vuint16mf4x5_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg5e16_v_u16mf4x5_tu(vd, rs1, vl); } -vuint16mf2x5_t test_vlseg5e16_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2x5_t test_vlseg5e16_v_u16mf2x5_tu(vuint16mf2x5_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg5e16_v_u16mf2x5_tu(vd, rs1, vl); } -vuint16m1x5_t test_vlseg5e16_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1x5_t test_vlseg5e16_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, + size_t vl) { return __riscv_vlseg5e16_v_u16m1x5_tu(vd, rs1, vl); } -vfloat16mf4x5_t test_vlseg5e16_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4x5_t test_vlseg5e16_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg5e16_v_f16mf4x5_tum(vm, vd, rs1, vl); } -vfloat16mf2x5_t test_vlseg5e16_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2x5_t test_vlseg5e16_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg5e16_v_f16mf2x5_tum(vm, vd, rs1, vl); } -vfloat16m1x5_t test_vlseg5e16_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1x5_t test_vlseg5e16_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg5e16_v_f16m1x5_tum(vm, vd, rs1, vl); } -vint16mf4x5_t test_vlseg5e16_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, size_t vl) { +vint16mf4x5_t test_vlseg5e16_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg5e16_v_i16mf4x5_tum(vm, vd, rs1, vl); } -vint16mf2x5_t test_vlseg5e16_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, size_t vl) { +vint16mf2x5_t test_vlseg5e16_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg5e16_v_i16mf2x5_tum(vm, vd, rs1, vl); } -vint16m1x5_t test_vlseg5e16_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, size_t vl) { +vint16m1x5_t test_vlseg5e16_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg5e16_v_i16m1x5_tum(vm, vd, rs1, vl); } -vuint16mf4x5_t test_vlseg5e16_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4x5_t test_vlseg5e16_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg5e16_v_u16mf4x5_tum(vm, vd, rs1, vl); } -vuint16mf2x5_t test_vlseg5e16_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2x5_t test_vlseg5e16_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg5e16_v_u16mf2x5_tum(vm, vd, rs1, vl); } -vuint16m1x5_t test_vlseg5e16_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1x5_t test_vlseg5e16_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg5e16_v_u16m1x5_tum(vm, vd, rs1, vl); } -vfloat16mf4x5_t test_vlseg5e16_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4x5_t test_vlseg5e16_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg5e16_v_f16mf4x5_tumu(vm, vd, rs1, vl); } -vfloat16mf2x5_t test_vlseg5e16_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2x5_t test_vlseg5e16_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg5e16_v_f16mf2x5_tumu(vm, vd, rs1, vl); } -vfloat16m1x5_t test_vlseg5e16_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1x5_t test_vlseg5e16_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg5e16_v_f16m1x5_tumu(vm, vd, rs1, vl); } -vint16mf4x5_t test_vlseg5e16_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, size_t vl) { +vint16mf4x5_t test_vlseg5e16_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg5e16_v_i16mf4x5_tumu(vm, vd, rs1, vl); } -vint16mf2x5_t test_vlseg5e16_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, size_t vl) { +vint16mf2x5_t test_vlseg5e16_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg5e16_v_i16mf2x5_tumu(vm, vd, rs1, vl); } -vint16m1x5_t test_vlseg5e16_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, size_t vl) { +vint16m1x5_t test_vlseg5e16_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg5e16_v_i16m1x5_tumu(vm, vd, rs1, vl); } -vuint16mf4x5_t test_vlseg5e16_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4x5_t test_vlseg5e16_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg5e16_v_u16mf4x5_tumu(vm, vd, rs1, vl); } -vuint16mf2x5_t test_vlseg5e16_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2x5_t test_vlseg5e16_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg5e16_v_u16mf2x5_tumu(vm, vd, rs1, vl); } -vuint16m1x5_t test_vlseg5e16_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1x5_t test_vlseg5e16_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg5e16_v_u16m1x5_tumu(vm, vd, rs1, vl); } -vfloat16mf4x5_t test_vlseg5e16_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4x5_t test_vlseg5e16_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg5e16_v_f16mf4x5_mu(vm, vd, rs1, vl); } -vfloat16mf2x5_t test_vlseg5e16_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2x5_t test_vlseg5e16_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg5e16_v_f16mf2x5_mu(vm, vd, rs1, vl); } -vfloat16m1x5_t test_vlseg5e16_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1x5_t test_vlseg5e16_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg5e16_v_f16m1x5_mu(vm, vd, rs1, vl); } -vint16mf4x5_t test_vlseg5e16_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, size_t vl) { +vint16mf4x5_t test_vlseg5e16_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg5e16_v_i16mf4x5_mu(vm, vd, rs1, vl); } -vint16mf2x5_t test_vlseg5e16_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, size_t vl) { +vint16mf2x5_t test_vlseg5e16_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg5e16_v_i16mf2x5_mu(vm, vd, rs1, vl); } -vint16m1x5_t test_vlseg5e16_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, size_t vl) { +vint16m1x5_t test_vlseg5e16_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg5e16_v_i16m1x5_mu(vm, vd, rs1, vl); } -vuint16mf4x5_t test_vlseg5e16_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4x5_t test_vlseg5e16_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg5e16_v_u16mf4x5_mu(vm, vd, rs1, vl); } -vuint16mf2x5_t test_vlseg5e16_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2x5_t test_vlseg5e16_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg5e16_v_u16mf2x5_mu(vm, vd, rs1, vl); } -vuint16m1x5_t test_vlseg5e16_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1x5_t test_vlseg5e16_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg5e16_v_u16m1x5_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg5e16ff.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg5e16ff.c index 139cd019c..238570b7a 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg5e16ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg5e16ff.c @@ -1,151 +1,226 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_tu(vfloat16mf4x5_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e16ff_v_f16mf4x5_tu(vd, rs1, new_vl, vl); } -vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_tu(vfloat16mf2x5_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e16ff_v_f16mf2x5_tu(vd, rs1, new_vl, vl); } -vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_tu(vfloat16m1x5_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_tu(vfloat16m1x5_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e16ff_v_f16m1x5_tu(vd, rs1, new_vl, vl); } -vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_tu(vint16mf4x5_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e16ff_v_i16mf4x5_tu(vd, rs1, new_vl, vl); } -vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_tu(vint16mf2x5_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e16ff_v_i16mf2x5_tu(vd, rs1, new_vl, vl); } -vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e16ff_v_i16m1x5_tu(vd, rs1, new_vl, vl); } -vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_tu(vuint16mf4x5_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e16ff_v_u16mf4x5_tu(vd, rs1, new_vl, vl); } -vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_tu(vuint16mf2x5_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e16ff_v_u16mf2x5_tu(vd, rs1, new_vl, vl); } -vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_tu(vuint16m1x5_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e16ff_v_u16m1x5_tu(vd, rs1, new_vl, vl); } -vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_tum(vbool64_t vm, + vfloat16mf4x5_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e16ff_v_f16mf4x5_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_tum(vbool32_t vm, + vfloat16mf2x5_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e16ff_v_f16mf2x5_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e16ff_v_f16m1x5_tum(vm, vd, rs1, new_vl, vl); } -vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e16ff_v_i16mf4x5_tum(vm, vd, rs1, new_vl, vl); } -vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e16ff_v_i16mf2x5_tum(vm, vd, rs1, new_vl, vl); } -vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e16ff_v_i16m1x5_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e16ff_v_u16mf4x5_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e16ff_v_u16mf2x5_tum(vm, vd, rs1, new_vl, vl); } -vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e16ff_v_u16m1x5_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_tumu(vbool64_t vm, + vfloat16mf4x5_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e16ff_v_f16mf4x5_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_tumu(vbool32_t vm, + vfloat16mf2x5_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e16ff_v_f16mf2x5_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e16ff_v_f16m1x5_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e16ff_v_i16mf4x5_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e16ff_v_i16mf2x5_tumu(vm, vd, rs1, new_vl, vl); } -vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e16ff_v_i16m1x5_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e16ff_v_u16mf4x5_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e16ff_v_u16mf2x5_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e16ff_v_u16m1x5_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4x5_t test_vlseg5e16ff_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e16ff_v_f16mf4x5_mu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2x5_t test_vlseg5e16ff_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e16ff_v_f16mf2x5_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1x5_t test_vlseg5e16ff_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e16ff_v_f16m1x5_mu(vm, vd, rs1, new_vl, vl); } -vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4x5_t test_vlseg5e16ff_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e16ff_v_i16mf4x5_mu(vm, vd, rs1, new_vl, vl); } -vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2x5_t test_vlseg5e16ff_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e16ff_v_i16mf2x5_mu(vm, vd, rs1, new_vl, vl); } -vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1x5_t test_vlseg5e16ff_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e16ff_v_i16m1x5_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4x5_t test_vlseg5e16ff_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e16ff_v_u16mf4x5_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2x5_t test_vlseg5e16ff_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e16ff_v_u16mf2x5_mu(vm, vd, rs1, new_vl, vl); } -vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1x5_t test_vlseg5e16ff_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e16ff_v_u16m1x5_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg5e32.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg5e32.c index 30bd69b28..ff7ae0c39 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg5e32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg5e32.c @@ -1,103 +1,127 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32mf2x5_t test_vlseg5e32_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float *rs1, size_t vl) { +vfloat32mf2x5_t test_vlseg5e32_v_f32mf2x5_tu(vfloat32mf2x5_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg5e32_v_f32mf2x5_tu(vd, rs1, vl); } -vfloat32m1x5_t test_vlseg5e32_v_f32m1x5_tu(vfloat32m1x5_t vd, const float *rs1, size_t vl) { +vfloat32m1x5_t test_vlseg5e32_v_f32m1x5_tu(vfloat32m1x5_t vd, const float *rs1, + size_t vl) { return __riscv_vlseg5e32_v_f32m1x5_tu(vd, rs1, vl); } -vint32mf2x5_t test_vlseg5e32_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, size_t vl) { +vint32mf2x5_t test_vlseg5e32_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, + size_t vl) { return __riscv_vlseg5e32_v_i32mf2x5_tu(vd, rs1, vl); } -vint32m1x5_t test_vlseg5e32_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, size_t vl) { +vint32m1x5_t test_vlseg5e32_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, + size_t vl) { return __riscv_vlseg5e32_v_i32m1x5_tu(vd, rs1, vl); } -vuint32mf2x5_t test_vlseg5e32_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2x5_t test_vlseg5e32_v_u32mf2x5_tu(vuint32mf2x5_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg5e32_v_u32mf2x5_tu(vd, rs1, vl); } -vuint32m1x5_t test_vlseg5e32_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1x5_t test_vlseg5e32_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, + size_t vl) { return __riscv_vlseg5e32_v_u32m1x5_tu(vd, rs1, vl); } -vfloat32mf2x5_t test_vlseg5e32_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, size_t vl) { +vfloat32mf2x5_t test_vlseg5e32_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg5e32_v_f32mf2x5_tum(vm, vd, rs1, vl); } -vfloat32m1x5_t test_vlseg5e32_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, size_t vl) { +vfloat32m1x5_t test_vlseg5e32_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg5e32_v_f32m1x5_tum(vm, vd, rs1, vl); } -vint32mf2x5_t test_vlseg5e32_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, size_t vl) { +vint32mf2x5_t test_vlseg5e32_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg5e32_v_i32mf2x5_tum(vm, vd, rs1, vl); } -vint32m1x5_t test_vlseg5e32_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, size_t vl) { +vint32m1x5_t test_vlseg5e32_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg5e32_v_i32m1x5_tum(vm, vd, rs1, vl); } -vuint32mf2x5_t test_vlseg5e32_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2x5_t test_vlseg5e32_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg5e32_v_u32mf2x5_tum(vm, vd, rs1, vl); } -vuint32m1x5_t test_vlseg5e32_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1x5_t test_vlseg5e32_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg5e32_v_u32m1x5_tum(vm, vd, rs1, vl); } -vfloat32mf2x5_t test_vlseg5e32_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, size_t vl) { +vfloat32mf2x5_t test_vlseg5e32_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg5e32_v_f32mf2x5_tumu(vm, vd, rs1, vl); } -vfloat32m1x5_t test_vlseg5e32_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, size_t vl) { +vfloat32m1x5_t test_vlseg5e32_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg5e32_v_f32m1x5_tumu(vm, vd, rs1, vl); } -vint32mf2x5_t test_vlseg5e32_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, size_t vl) { +vint32mf2x5_t test_vlseg5e32_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg5e32_v_i32mf2x5_tumu(vm, vd, rs1, vl); } -vint32m1x5_t test_vlseg5e32_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, size_t vl) { +vint32m1x5_t test_vlseg5e32_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg5e32_v_i32m1x5_tumu(vm, vd, rs1, vl); } -vuint32mf2x5_t test_vlseg5e32_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2x5_t test_vlseg5e32_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg5e32_v_u32mf2x5_tumu(vm, vd, rs1, vl); } -vuint32m1x5_t test_vlseg5e32_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1x5_t test_vlseg5e32_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg5e32_v_u32m1x5_tumu(vm, vd, rs1, vl); } -vfloat32mf2x5_t test_vlseg5e32_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, size_t vl) { +vfloat32mf2x5_t test_vlseg5e32_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg5e32_v_f32mf2x5_mu(vm, vd, rs1, vl); } -vfloat32m1x5_t test_vlseg5e32_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, size_t vl) { +vfloat32m1x5_t test_vlseg5e32_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg5e32_v_f32m1x5_mu(vm, vd, rs1, vl); } -vint32mf2x5_t test_vlseg5e32_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, size_t vl) { +vint32mf2x5_t test_vlseg5e32_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg5e32_v_i32mf2x5_mu(vm, vd, rs1, vl); } -vint32m1x5_t test_vlseg5e32_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, size_t vl) { +vint32m1x5_t test_vlseg5e32_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg5e32_v_i32m1x5_mu(vm, vd, rs1, vl); } -vuint32mf2x5_t test_vlseg5e32_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2x5_t test_vlseg5e32_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg5e32_v_u32mf2x5_mu(vm, vd, rs1, vl); } -vuint32m1x5_t test_vlseg5e32_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1x5_t test_vlseg5e32_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg5e32_v_u32m1x5_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg5e32ff.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg5e32ff.c index 6bc392ac7..416d00030 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg5e32ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg5e32ff.c @@ -1,103 +1,152 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_tu(vfloat32mf2x5_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e32ff_v_f32mf2x5_tu(vd, rs1, new_vl, vl); } -vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_tu(vfloat32m1x5_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_tu(vfloat32m1x5_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e32ff_v_f32m1x5_tu(vd, rs1, new_vl, vl); } -vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_tu(vint32mf2x5_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e32ff_v_i32mf2x5_tu(vd, rs1, new_vl, vl); } -vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e32ff_v_i32m1x5_tu(vd, rs1, new_vl, vl); } -vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_tu(vuint32mf2x5_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e32ff_v_u32mf2x5_tu(vd, rs1, new_vl, vl); } -vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_tu(vuint32m1x5_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e32ff_v_u32m1x5_tu(vd, rs1, new_vl, vl); } -vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_tum(vbool64_t vm, + vfloat32mf2x5_t vd, + const float *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e32ff_v_f32mf2x5_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e32ff_v_f32m1x5_tum(vm, vd, rs1, new_vl, vl); } -vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e32ff_v_i32mf2x5_tum(vm, vd, rs1, new_vl, vl); } -vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e32ff_v_i32m1x5_tum(vm, vd, rs1, new_vl, vl); } -vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e32ff_v_u32mf2x5_tum(vm, vd, rs1, new_vl, vl); } -vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e32ff_v_u32m1x5_tum(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_tumu(vbool64_t vm, + vfloat32mf2x5_t vd, + const float *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e32ff_v_f32mf2x5_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e32ff_v_f32m1x5_tumu(vm, vd, rs1, new_vl, vl); } -vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e32ff_v_i32mf2x5_tumu(vm, vd, rs1, new_vl, vl); } -vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e32ff_v_i32m1x5_tumu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e32ff_v_u32mf2x5_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e32ff_v_u32m1x5_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2x5_t test_vlseg5e32ff_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e32ff_v_f32mf2x5_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1x5_t test_vlseg5e32ff_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e32ff_v_f32m1x5_mu(vm, vd, rs1, new_vl, vl); } -vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2x5_t test_vlseg5e32ff_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e32ff_v_i32mf2x5_mu(vm, vd, rs1, new_vl, vl); } -vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1x5_t test_vlseg5e32ff_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e32ff_v_i32m1x5_mu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2x5_t test_vlseg5e32ff_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e32ff_v_u32mf2x5_mu(vm, vd, rs1, new_vl, vl); } -vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1x5_t test_vlseg5e32ff_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e32ff_v_u32m1x5_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg5e64.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg5e64.c index 3b47cf95d..0e6defe74 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg5e64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg5e64.c @@ -1,55 +1,67 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat64m1x5_t test_vlseg5e64_v_f64m1x5_tu(vfloat64m1x5_t vd, const double *rs1, size_t vl) { +vfloat64m1x5_t test_vlseg5e64_v_f64m1x5_tu(vfloat64m1x5_t vd, const double *rs1, + size_t vl) { return __riscv_vlseg5e64_v_f64m1x5_tu(vd, rs1, vl); } -vint64m1x5_t test_vlseg5e64_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, size_t vl) { +vint64m1x5_t test_vlseg5e64_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, + size_t vl) { return __riscv_vlseg5e64_v_i64m1x5_tu(vd, rs1, vl); } -vuint64m1x5_t test_vlseg5e64_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1x5_t test_vlseg5e64_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, + size_t vl) { return __riscv_vlseg5e64_v_u64m1x5_tu(vd, rs1, vl); } -vfloat64m1x5_t test_vlseg5e64_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, size_t vl) { +vfloat64m1x5_t test_vlseg5e64_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg5e64_v_f64m1x5_tum(vm, vd, rs1, vl); } -vint64m1x5_t test_vlseg5e64_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, size_t vl) { +vint64m1x5_t test_vlseg5e64_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg5e64_v_i64m1x5_tum(vm, vd, rs1, vl); } -vuint64m1x5_t test_vlseg5e64_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1x5_t test_vlseg5e64_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg5e64_v_u64m1x5_tum(vm, vd, rs1, vl); } -vfloat64m1x5_t test_vlseg5e64_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, size_t vl) { +vfloat64m1x5_t test_vlseg5e64_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg5e64_v_f64m1x5_tumu(vm, vd, rs1, vl); } -vint64m1x5_t test_vlseg5e64_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, size_t vl) { +vint64m1x5_t test_vlseg5e64_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg5e64_v_i64m1x5_tumu(vm, vd, rs1, vl); } -vuint64m1x5_t test_vlseg5e64_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1x5_t test_vlseg5e64_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg5e64_v_u64m1x5_tumu(vm, vd, rs1, vl); } -vfloat64m1x5_t test_vlseg5e64_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, size_t vl) { +vfloat64m1x5_t test_vlseg5e64_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg5e64_v_f64m1x5_mu(vm, vd, rs1, vl); } -vint64m1x5_t test_vlseg5e64_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, size_t vl) { +vint64m1x5_t test_vlseg5e64_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg5e64_v_i64m1x5_mu(vm, vd, rs1, vl); } -vuint64m1x5_t test_vlseg5e64_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1x5_t test_vlseg5e64_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg5e64_v_u64m1x5_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg5e64ff.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg5e64ff.c index ffe2a4385..c8db7a68b 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg5e64ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg5e64ff.c @@ -1,55 +1,78 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tu(vfloat64m1x5_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tu(vfloat64m1x5_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e64ff_v_f64m1x5_tu(vd, rs1, new_vl, vl); } -vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e64ff_v_i64m1x5_tu(vd, rs1, new_vl, vl); } -vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tu(vuint64m1x5_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e64ff_v_u64m1x5_tu(vd, rs1, new_vl, vl); } -vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e64ff_v_f64m1x5_tum(vm, vd, rs1, new_vl, vl); } -vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e64ff_v_i64m1x5_tum(vm, vd, rs1, new_vl, vl); } -vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e64ff_v_u64m1x5_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e64ff_v_f64m1x5_tumu(vm, vd, rs1, new_vl, vl); } -vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e64ff_v_i64m1x5_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e64ff_v_u64m1x5_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1x5_t test_vlseg5e64ff_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e64ff_v_f64m1x5_mu(vm, vd, rs1, new_vl, vl); } -vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1x5_t test_vlseg5e64ff_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e64ff_v_i64m1x5_mu(vm, vd, rs1, new_vl, vl); } -vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1x5_t test_vlseg5e64ff_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e64ff_v_u64m1x5_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg5e8.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg5e8.c index df5401ab9..c1dd72f72 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg5e8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg5e8.c @@ -5,130 +5,162 @@ #include -vint8mf8x5_t test_vlseg5e8_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, size_t vl) { +vint8mf8x5_t test_vlseg5e8_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg5e8_v_i8mf8x5_tu(vd, rs1, vl); } -vint8mf4x5_t test_vlseg5e8_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, size_t vl) { +vint8mf4x5_t test_vlseg5e8_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg5e8_v_i8mf4x5_tu(vd, rs1, vl); } -vint8mf2x5_t test_vlseg5e8_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, size_t vl) { +vint8mf2x5_t test_vlseg5e8_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg5e8_v_i8mf2x5_tu(vd, rs1, vl); } -vint8m1x5_t test_vlseg5e8_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, size_t vl) { +vint8m1x5_t test_vlseg5e8_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg5e8_v_i8m1x5_tu(vd, rs1, vl); } -vuint8mf8x5_t test_vlseg5e8_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8x5_t test_vlseg5e8_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg5e8_v_u8mf8x5_tu(vd, rs1, vl); } -vuint8mf4x5_t test_vlseg5e8_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4x5_t test_vlseg5e8_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg5e8_v_u8mf4x5_tu(vd, rs1, vl); } -vuint8mf2x5_t test_vlseg5e8_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2x5_t test_vlseg5e8_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg5e8_v_u8mf2x5_tu(vd, rs1, vl); } -vuint8m1x5_t test_vlseg5e8_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, size_t vl) { +vuint8m1x5_t test_vlseg5e8_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg5e8_v_u8m1x5_tu(vd, rs1, vl); } -vint8mf8x5_t test_vlseg5e8_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, size_t vl) { +vint8mf8x5_t test_vlseg5e8_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg5e8_v_i8mf8x5_tum(vm, vd, rs1, vl); } -vint8mf4x5_t test_vlseg5e8_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, size_t vl) { +vint8mf4x5_t test_vlseg5e8_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg5e8_v_i8mf4x5_tum(vm, vd, rs1, vl); } -vint8mf2x5_t test_vlseg5e8_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, size_t vl) { +vint8mf2x5_t test_vlseg5e8_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg5e8_v_i8mf2x5_tum(vm, vd, rs1, vl); } -vint8m1x5_t test_vlseg5e8_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, size_t vl) { +vint8m1x5_t test_vlseg5e8_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg5e8_v_i8m1x5_tum(vm, vd, rs1, vl); } -vuint8mf8x5_t test_vlseg5e8_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8x5_t test_vlseg5e8_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg5e8_v_u8mf8x5_tum(vm, vd, rs1, vl); } -vuint8mf4x5_t test_vlseg5e8_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4x5_t test_vlseg5e8_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg5e8_v_u8mf4x5_tum(vm, vd, rs1, vl); } -vuint8mf2x5_t test_vlseg5e8_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2x5_t test_vlseg5e8_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg5e8_v_u8mf2x5_tum(vm, vd, rs1, vl); } -vuint8m1x5_t test_vlseg5e8_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, size_t vl) { +vuint8m1x5_t test_vlseg5e8_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg5e8_v_u8m1x5_tum(vm, vd, rs1, vl); } -vint8mf8x5_t test_vlseg5e8_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, size_t vl) { +vint8mf8x5_t test_vlseg5e8_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg5e8_v_i8mf8x5_tumu(vm, vd, rs1, vl); } -vint8mf4x5_t test_vlseg5e8_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, size_t vl) { +vint8mf4x5_t test_vlseg5e8_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg5e8_v_i8mf4x5_tumu(vm, vd, rs1, vl); } -vint8mf2x5_t test_vlseg5e8_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, size_t vl) { +vint8mf2x5_t test_vlseg5e8_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg5e8_v_i8mf2x5_tumu(vm, vd, rs1, vl); } -vint8m1x5_t test_vlseg5e8_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, size_t vl) { +vint8m1x5_t test_vlseg5e8_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg5e8_v_i8m1x5_tumu(vm, vd, rs1, vl); } -vuint8mf8x5_t test_vlseg5e8_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8x5_t test_vlseg5e8_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg5e8_v_u8mf8x5_tumu(vm, vd, rs1, vl); } -vuint8mf4x5_t test_vlseg5e8_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4x5_t test_vlseg5e8_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg5e8_v_u8mf4x5_tumu(vm, vd, rs1, vl); } -vuint8mf2x5_t test_vlseg5e8_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2x5_t test_vlseg5e8_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg5e8_v_u8mf2x5_tumu(vm, vd, rs1, vl); } -vuint8m1x5_t test_vlseg5e8_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, size_t vl) { +vuint8m1x5_t test_vlseg5e8_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg5e8_v_u8m1x5_tumu(vm, vd, rs1, vl); } -vint8mf8x5_t test_vlseg5e8_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, size_t vl) { +vint8mf8x5_t test_vlseg5e8_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg5e8_v_i8mf8x5_mu(vm, vd, rs1, vl); } -vint8mf4x5_t test_vlseg5e8_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, size_t vl) { +vint8mf4x5_t test_vlseg5e8_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg5e8_v_i8mf4x5_mu(vm, vd, rs1, vl); } -vint8mf2x5_t test_vlseg5e8_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, size_t vl) { +vint8mf2x5_t test_vlseg5e8_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg5e8_v_i8mf2x5_mu(vm, vd, rs1, vl); } -vint8m1x5_t test_vlseg5e8_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, size_t vl) { +vint8m1x5_t test_vlseg5e8_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg5e8_v_i8m1x5_mu(vm, vd, rs1, vl); } -vuint8mf8x5_t test_vlseg5e8_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8x5_t test_vlseg5e8_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg5e8_v_u8mf8x5_mu(vm, vd, rs1, vl); } -vuint8mf4x5_t test_vlseg5e8_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4x5_t test_vlseg5e8_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg5e8_v_u8mf4x5_mu(vm, vd, rs1, vl); } -vuint8mf2x5_t test_vlseg5e8_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2x5_t test_vlseg5e8_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg5e8_v_u8mf2x5_mu(vm, vd, rs1, vl); } -vuint8m1x5_t test_vlseg5e8_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, size_t vl) { +vuint8m1x5_t test_vlseg5e8_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg5e8_v_u8m1x5_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg5e8ff.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg5e8ff.c index 82795a14b..419500c2d 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg5e8ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg5e8ff.c @@ -1,135 +1,191 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e8ff_v_i8mf8x5_tu(vd, rs1, new_vl, vl); } -vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e8ff_v_i8mf4x5_tu(vd, rs1, new_vl, vl); } -vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e8ff_v_i8mf2x5_tu(vd, rs1, new_vl, vl); } -vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e8ff_v_i8m1x5_tu(vd, rs1, new_vl, vl); } -vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e8ff_v_u8mf8x5_tu(vd, rs1, new_vl, vl); } -vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e8ff_v_u8mf4x5_tu(vd, rs1, new_vl, vl); } -vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e8ff_v_u8mf2x5_tu(vd, rs1, new_vl, vl); } -vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg5e8ff_v_u8m1x5_tu(vd, rs1, new_vl, vl); } -vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e8ff_v_i8mf8x5_tum(vm, vd, rs1, new_vl, vl); } -vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e8ff_v_i8mf4x5_tum(vm, vd, rs1, new_vl, vl); } -vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e8ff_v_i8mf2x5_tum(vm, vd, rs1, new_vl, vl); } -vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e8ff_v_i8m1x5_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e8ff_v_u8mf8x5_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e8ff_v_u8mf4x5_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e8ff_v_u8mf2x5_tum(vm, vd, rs1, new_vl, vl); } -vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e8ff_v_u8m1x5_tum(vm, vd, rs1, new_vl, vl); } -vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e8ff_v_i8mf8x5_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e8ff_v_i8mf4x5_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e8ff_v_i8mf2x5_tumu(vm, vd, rs1, new_vl, vl); } -vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e8ff_v_i8m1x5_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e8ff_v_u8mf8x5_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e8ff_v_u8mf4x5_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e8ff_v_u8mf2x5_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e8ff_v_u8m1x5_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8x5_t test_vlseg5e8ff_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e8ff_v_i8mf8x5_mu(vm, vd, rs1, new_vl, vl); } -vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4x5_t test_vlseg5e8ff_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e8ff_v_i8mf4x5_mu(vm, vd, rs1, new_vl, vl); } -vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2x5_t test_vlseg5e8ff_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e8ff_v_i8mf2x5_mu(vm, vd, rs1, new_vl, vl); } -vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1x5_t test_vlseg5e8ff_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e8ff_v_i8m1x5_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8x5_t test_vlseg5e8ff_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e8ff_v_u8mf8x5_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4x5_t test_vlseg5e8ff_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e8ff_v_u8mf4x5_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2x5_t test_vlseg5e8ff_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e8ff_v_u8mf2x5_mu(vm, vd, rs1, new_vl, vl); } -vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1x5_t test_vlseg5e8ff_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg5e8ff_v_u8m1x5_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg6e16.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg6e16.c index 83a8e381b..a91ad7254 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg6e16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg6e16.c @@ -1,151 +1,187 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x6_t test_vlseg6e16_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4x6_t test_vlseg6e16_v_f16mf4x6_tu(vfloat16mf4x6_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg6e16_v_f16mf4x6_tu(vd, rs1, vl); } -vfloat16mf2x6_t test_vlseg6e16_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2x6_t test_vlseg6e16_v_f16mf2x6_tu(vfloat16mf2x6_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg6e16_v_f16mf2x6_tu(vd, rs1, vl); } -vfloat16m1x6_t test_vlseg6e16_v_f16m1x6_tu(vfloat16m1x6_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1x6_t test_vlseg6e16_v_f16m1x6_tu(vfloat16m1x6_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg6e16_v_f16m1x6_tu(vd, rs1, vl); } -vint16mf4x6_t test_vlseg6e16_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, size_t vl) { +vint16mf4x6_t test_vlseg6e16_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, + size_t vl) { return __riscv_vlseg6e16_v_i16mf4x6_tu(vd, rs1, vl); } -vint16mf2x6_t test_vlseg6e16_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, size_t vl) { +vint16mf2x6_t test_vlseg6e16_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, + size_t vl) { return __riscv_vlseg6e16_v_i16mf2x6_tu(vd, rs1, vl); } -vint16m1x6_t test_vlseg6e16_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, size_t vl) { +vint16m1x6_t test_vlseg6e16_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, + size_t vl) { return __riscv_vlseg6e16_v_i16m1x6_tu(vd, rs1, vl); } -vuint16mf4x6_t test_vlseg6e16_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4x6_t test_vlseg6e16_v_u16mf4x6_tu(vuint16mf4x6_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg6e16_v_u16mf4x6_tu(vd, rs1, vl); } -vuint16mf2x6_t test_vlseg6e16_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2x6_t test_vlseg6e16_v_u16mf2x6_tu(vuint16mf2x6_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg6e16_v_u16mf2x6_tu(vd, rs1, vl); } -vuint16m1x6_t test_vlseg6e16_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1x6_t test_vlseg6e16_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, + size_t vl) { return __riscv_vlseg6e16_v_u16m1x6_tu(vd, rs1, vl); } -vfloat16mf4x6_t test_vlseg6e16_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4x6_t test_vlseg6e16_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg6e16_v_f16mf4x6_tum(vm, vd, rs1, vl); } -vfloat16mf2x6_t test_vlseg6e16_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2x6_t test_vlseg6e16_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg6e16_v_f16mf2x6_tum(vm, vd, rs1, vl); } -vfloat16m1x6_t test_vlseg6e16_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1x6_t test_vlseg6e16_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg6e16_v_f16m1x6_tum(vm, vd, rs1, vl); } -vint16mf4x6_t test_vlseg6e16_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, size_t vl) { +vint16mf4x6_t test_vlseg6e16_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg6e16_v_i16mf4x6_tum(vm, vd, rs1, vl); } -vint16mf2x6_t test_vlseg6e16_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, size_t vl) { +vint16mf2x6_t test_vlseg6e16_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg6e16_v_i16mf2x6_tum(vm, vd, rs1, vl); } -vint16m1x6_t test_vlseg6e16_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, size_t vl) { +vint16m1x6_t test_vlseg6e16_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg6e16_v_i16m1x6_tum(vm, vd, rs1, vl); } -vuint16mf4x6_t test_vlseg6e16_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4x6_t test_vlseg6e16_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg6e16_v_u16mf4x6_tum(vm, vd, rs1, vl); } -vuint16mf2x6_t test_vlseg6e16_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2x6_t test_vlseg6e16_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg6e16_v_u16mf2x6_tum(vm, vd, rs1, vl); } -vuint16m1x6_t test_vlseg6e16_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1x6_t test_vlseg6e16_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg6e16_v_u16m1x6_tum(vm, vd, rs1, vl); } -vfloat16mf4x6_t test_vlseg6e16_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4x6_t test_vlseg6e16_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg6e16_v_f16mf4x6_tumu(vm, vd, rs1, vl); } -vfloat16mf2x6_t test_vlseg6e16_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2x6_t test_vlseg6e16_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg6e16_v_f16mf2x6_tumu(vm, vd, rs1, vl); } -vfloat16m1x6_t test_vlseg6e16_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1x6_t test_vlseg6e16_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg6e16_v_f16m1x6_tumu(vm, vd, rs1, vl); } -vint16mf4x6_t test_vlseg6e16_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, size_t vl) { +vint16mf4x6_t test_vlseg6e16_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg6e16_v_i16mf4x6_tumu(vm, vd, rs1, vl); } -vint16mf2x6_t test_vlseg6e16_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, size_t vl) { +vint16mf2x6_t test_vlseg6e16_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg6e16_v_i16mf2x6_tumu(vm, vd, rs1, vl); } -vint16m1x6_t test_vlseg6e16_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, size_t vl) { +vint16m1x6_t test_vlseg6e16_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg6e16_v_i16m1x6_tumu(vm, vd, rs1, vl); } -vuint16mf4x6_t test_vlseg6e16_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4x6_t test_vlseg6e16_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg6e16_v_u16mf4x6_tumu(vm, vd, rs1, vl); } -vuint16mf2x6_t test_vlseg6e16_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2x6_t test_vlseg6e16_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg6e16_v_u16mf2x6_tumu(vm, vd, rs1, vl); } -vuint16m1x6_t test_vlseg6e16_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1x6_t test_vlseg6e16_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg6e16_v_u16m1x6_tumu(vm, vd, rs1, vl); } -vfloat16mf4x6_t test_vlseg6e16_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4x6_t test_vlseg6e16_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg6e16_v_f16mf4x6_mu(vm, vd, rs1, vl); } -vfloat16mf2x6_t test_vlseg6e16_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2x6_t test_vlseg6e16_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg6e16_v_f16mf2x6_mu(vm, vd, rs1, vl); } -vfloat16m1x6_t test_vlseg6e16_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1x6_t test_vlseg6e16_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg6e16_v_f16m1x6_mu(vm, vd, rs1, vl); } -vint16mf4x6_t test_vlseg6e16_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, size_t vl) { +vint16mf4x6_t test_vlseg6e16_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg6e16_v_i16mf4x6_mu(vm, vd, rs1, vl); } -vint16mf2x6_t test_vlseg6e16_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, size_t vl) { +vint16mf2x6_t test_vlseg6e16_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg6e16_v_i16mf2x6_mu(vm, vd, rs1, vl); } -vint16m1x6_t test_vlseg6e16_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, size_t vl) { +vint16m1x6_t test_vlseg6e16_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg6e16_v_i16m1x6_mu(vm, vd, rs1, vl); } -vuint16mf4x6_t test_vlseg6e16_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4x6_t test_vlseg6e16_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg6e16_v_u16mf4x6_mu(vm, vd, rs1, vl); } -vuint16mf2x6_t test_vlseg6e16_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2x6_t test_vlseg6e16_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg6e16_v_u16mf2x6_mu(vm, vd, rs1, vl); } -vuint16m1x6_t test_vlseg6e16_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1x6_t test_vlseg6e16_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg6e16_v_u16m1x6_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg6e16ff.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg6e16ff.c index e67387d5b..e79ca2fed 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg6e16ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg6e16ff.c @@ -1,151 +1,226 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_tu(vfloat16mf4x6_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e16ff_v_f16mf4x6_tu(vd, rs1, new_vl, vl); } -vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_tu(vfloat16mf2x6_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e16ff_v_f16mf2x6_tu(vd, rs1, new_vl, vl); } -vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_tu(vfloat16m1x6_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_tu(vfloat16m1x6_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e16ff_v_f16m1x6_tu(vd, rs1, new_vl, vl); } -vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_tu(vint16mf4x6_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e16ff_v_i16mf4x6_tu(vd, rs1, new_vl, vl); } -vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_tu(vint16mf2x6_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e16ff_v_i16mf2x6_tu(vd, rs1, new_vl, vl); } -vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e16ff_v_i16m1x6_tu(vd, rs1, new_vl, vl); } -vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_tu(vuint16mf4x6_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e16ff_v_u16mf4x6_tu(vd, rs1, new_vl, vl); } -vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_tu(vuint16mf2x6_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e16ff_v_u16mf2x6_tu(vd, rs1, new_vl, vl); } -vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_tu(vuint16m1x6_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e16ff_v_u16m1x6_tu(vd, rs1, new_vl, vl); } -vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_tum(vbool64_t vm, + vfloat16mf4x6_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e16ff_v_f16mf4x6_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_tum(vbool32_t vm, + vfloat16mf2x6_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e16ff_v_f16mf2x6_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e16ff_v_f16m1x6_tum(vm, vd, rs1, new_vl, vl); } -vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e16ff_v_i16mf4x6_tum(vm, vd, rs1, new_vl, vl); } -vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e16ff_v_i16mf2x6_tum(vm, vd, rs1, new_vl, vl); } -vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e16ff_v_i16m1x6_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e16ff_v_u16mf4x6_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e16ff_v_u16mf2x6_tum(vm, vd, rs1, new_vl, vl); } -vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e16ff_v_u16m1x6_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_tumu(vbool64_t vm, + vfloat16mf4x6_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e16ff_v_f16mf4x6_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_tumu(vbool32_t vm, + vfloat16mf2x6_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e16ff_v_f16mf2x6_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e16ff_v_f16m1x6_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e16ff_v_i16mf4x6_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e16ff_v_i16mf2x6_tumu(vm, vd, rs1, new_vl, vl); } -vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e16ff_v_i16m1x6_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e16ff_v_u16mf4x6_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e16ff_v_u16mf2x6_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e16ff_v_u16m1x6_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4x6_t test_vlseg6e16ff_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e16ff_v_f16mf4x6_mu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2x6_t test_vlseg6e16ff_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e16ff_v_f16mf2x6_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1x6_t test_vlseg6e16ff_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e16ff_v_f16m1x6_mu(vm, vd, rs1, new_vl, vl); } -vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4x6_t test_vlseg6e16ff_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e16ff_v_i16mf4x6_mu(vm, vd, rs1, new_vl, vl); } -vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2x6_t test_vlseg6e16ff_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e16ff_v_i16mf2x6_mu(vm, vd, rs1, new_vl, vl); } -vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1x6_t test_vlseg6e16ff_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e16ff_v_i16m1x6_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4x6_t test_vlseg6e16ff_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e16ff_v_u16mf4x6_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2x6_t test_vlseg6e16ff_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e16ff_v_u16mf2x6_mu(vm, vd, rs1, new_vl, vl); } -vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1x6_t test_vlseg6e16ff_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e16ff_v_u16m1x6_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg6e32.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg6e32.c index 1f30114f5..ed0dc5cd7 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg6e32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg6e32.c @@ -1,103 +1,127 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32mf2x6_t test_vlseg6e32_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float *rs1, size_t vl) { +vfloat32mf2x6_t test_vlseg6e32_v_f32mf2x6_tu(vfloat32mf2x6_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg6e32_v_f32mf2x6_tu(vd, rs1, vl); } -vfloat32m1x6_t test_vlseg6e32_v_f32m1x6_tu(vfloat32m1x6_t vd, const float *rs1, size_t vl) { +vfloat32m1x6_t test_vlseg6e32_v_f32m1x6_tu(vfloat32m1x6_t vd, const float *rs1, + size_t vl) { return __riscv_vlseg6e32_v_f32m1x6_tu(vd, rs1, vl); } -vint32mf2x6_t test_vlseg6e32_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, size_t vl) { +vint32mf2x6_t test_vlseg6e32_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, + size_t vl) { return __riscv_vlseg6e32_v_i32mf2x6_tu(vd, rs1, vl); } -vint32m1x6_t test_vlseg6e32_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, size_t vl) { +vint32m1x6_t test_vlseg6e32_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, + size_t vl) { return __riscv_vlseg6e32_v_i32m1x6_tu(vd, rs1, vl); } -vuint32mf2x6_t test_vlseg6e32_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2x6_t test_vlseg6e32_v_u32mf2x6_tu(vuint32mf2x6_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg6e32_v_u32mf2x6_tu(vd, rs1, vl); } -vuint32m1x6_t test_vlseg6e32_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1x6_t test_vlseg6e32_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, + size_t vl) { return __riscv_vlseg6e32_v_u32m1x6_tu(vd, rs1, vl); } -vfloat32mf2x6_t test_vlseg6e32_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, size_t vl) { +vfloat32mf2x6_t test_vlseg6e32_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg6e32_v_f32mf2x6_tum(vm, vd, rs1, vl); } -vfloat32m1x6_t test_vlseg6e32_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, size_t vl) { +vfloat32m1x6_t test_vlseg6e32_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg6e32_v_f32m1x6_tum(vm, vd, rs1, vl); } -vint32mf2x6_t test_vlseg6e32_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, size_t vl) { +vint32mf2x6_t test_vlseg6e32_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg6e32_v_i32mf2x6_tum(vm, vd, rs1, vl); } -vint32m1x6_t test_vlseg6e32_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, size_t vl) { +vint32m1x6_t test_vlseg6e32_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg6e32_v_i32m1x6_tum(vm, vd, rs1, vl); } -vuint32mf2x6_t test_vlseg6e32_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2x6_t test_vlseg6e32_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg6e32_v_u32mf2x6_tum(vm, vd, rs1, vl); } -vuint32m1x6_t test_vlseg6e32_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1x6_t test_vlseg6e32_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg6e32_v_u32m1x6_tum(vm, vd, rs1, vl); } -vfloat32mf2x6_t test_vlseg6e32_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, size_t vl) { +vfloat32mf2x6_t test_vlseg6e32_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg6e32_v_f32mf2x6_tumu(vm, vd, rs1, vl); } -vfloat32m1x6_t test_vlseg6e32_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, size_t vl) { +vfloat32m1x6_t test_vlseg6e32_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg6e32_v_f32m1x6_tumu(vm, vd, rs1, vl); } -vint32mf2x6_t test_vlseg6e32_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, size_t vl) { +vint32mf2x6_t test_vlseg6e32_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg6e32_v_i32mf2x6_tumu(vm, vd, rs1, vl); } -vint32m1x6_t test_vlseg6e32_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, size_t vl) { +vint32m1x6_t test_vlseg6e32_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg6e32_v_i32m1x6_tumu(vm, vd, rs1, vl); } -vuint32mf2x6_t test_vlseg6e32_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2x6_t test_vlseg6e32_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg6e32_v_u32mf2x6_tumu(vm, vd, rs1, vl); } -vuint32m1x6_t test_vlseg6e32_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1x6_t test_vlseg6e32_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg6e32_v_u32m1x6_tumu(vm, vd, rs1, vl); } -vfloat32mf2x6_t test_vlseg6e32_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, size_t vl) { +vfloat32mf2x6_t test_vlseg6e32_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg6e32_v_f32mf2x6_mu(vm, vd, rs1, vl); } -vfloat32m1x6_t test_vlseg6e32_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, size_t vl) { +vfloat32m1x6_t test_vlseg6e32_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg6e32_v_f32m1x6_mu(vm, vd, rs1, vl); } -vint32mf2x6_t test_vlseg6e32_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, size_t vl) { +vint32mf2x6_t test_vlseg6e32_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg6e32_v_i32mf2x6_mu(vm, vd, rs1, vl); } -vint32m1x6_t test_vlseg6e32_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, size_t vl) { +vint32m1x6_t test_vlseg6e32_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg6e32_v_i32m1x6_mu(vm, vd, rs1, vl); } -vuint32mf2x6_t test_vlseg6e32_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2x6_t test_vlseg6e32_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg6e32_v_u32mf2x6_mu(vm, vd, rs1, vl); } -vuint32m1x6_t test_vlseg6e32_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1x6_t test_vlseg6e32_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg6e32_v_u32m1x6_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg6e32ff.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg6e32ff.c index 3a6e0ba3e..6b0bf3b27 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg6e32ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg6e32ff.c @@ -1,103 +1,152 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_tu(vfloat32mf2x6_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e32ff_v_f32mf2x6_tu(vd, rs1, new_vl, vl); } -vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_tu(vfloat32m1x6_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_tu(vfloat32m1x6_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e32ff_v_f32m1x6_tu(vd, rs1, new_vl, vl); } -vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_tu(vint32mf2x6_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e32ff_v_i32mf2x6_tu(vd, rs1, new_vl, vl); } -vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e32ff_v_i32m1x6_tu(vd, rs1, new_vl, vl); } -vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_tu(vuint32mf2x6_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e32ff_v_u32mf2x6_tu(vd, rs1, new_vl, vl); } -vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_tu(vuint32m1x6_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e32ff_v_u32m1x6_tu(vd, rs1, new_vl, vl); } -vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_tum(vbool64_t vm, + vfloat32mf2x6_t vd, + const float *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e32ff_v_f32mf2x6_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e32ff_v_f32m1x6_tum(vm, vd, rs1, new_vl, vl); } -vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e32ff_v_i32mf2x6_tum(vm, vd, rs1, new_vl, vl); } -vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e32ff_v_i32m1x6_tum(vm, vd, rs1, new_vl, vl); } -vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e32ff_v_u32mf2x6_tum(vm, vd, rs1, new_vl, vl); } -vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e32ff_v_u32m1x6_tum(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_tumu(vbool64_t vm, + vfloat32mf2x6_t vd, + const float *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e32ff_v_f32mf2x6_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e32ff_v_f32m1x6_tumu(vm, vd, rs1, new_vl, vl); } -vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e32ff_v_i32mf2x6_tumu(vm, vd, rs1, new_vl, vl); } -vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e32ff_v_i32m1x6_tumu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e32ff_v_u32mf2x6_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e32ff_v_u32m1x6_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2x6_t test_vlseg6e32ff_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e32ff_v_f32mf2x6_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1x6_t test_vlseg6e32ff_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e32ff_v_f32m1x6_mu(vm, vd, rs1, new_vl, vl); } -vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2x6_t test_vlseg6e32ff_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e32ff_v_i32mf2x6_mu(vm, vd, rs1, new_vl, vl); } -vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1x6_t test_vlseg6e32ff_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e32ff_v_i32m1x6_mu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2x6_t test_vlseg6e32ff_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e32ff_v_u32mf2x6_mu(vm, vd, rs1, new_vl, vl); } -vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1x6_t test_vlseg6e32ff_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e32ff_v_u32m1x6_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg6e64.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg6e64.c index 54e1ab8df..33314bb1e 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg6e64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg6e64.c @@ -1,55 +1,67 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat64m1x6_t test_vlseg6e64_v_f64m1x6_tu(vfloat64m1x6_t vd, const double *rs1, size_t vl) { +vfloat64m1x6_t test_vlseg6e64_v_f64m1x6_tu(vfloat64m1x6_t vd, const double *rs1, + size_t vl) { return __riscv_vlseg6e64_v_f64m1x6_tu(vd, rs1, vl); } -vint64m1x6_t test_vlseg6e64_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, size_t vl) { +vint64m1x6_t test_vlseg6e64_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, + size_t vl) { return __riscv_vlseg6e64_v_i64m1x6_tu(vd, rs1, vl); } -vuint64m1x6_t test_vlseg6e64_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1x6_t test_vlseg6e64_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, + size_t vl) { return __riscv_vlseg6e64_v_u64m1x6_tu(vd, rs1, vl); } -vfloat64m1x6_t test_vlseg6e64_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, size_t vl) { +vfloat64m1x6_t test_vlseg6e64_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg6e64_v_f64m1x6_tum(vm, vd, rs1, vl); } -vint64m1x6_t test_vlseg6e64_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, size_t vl) { +vint64m1x6_t test_vlseg6e64_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg6e64_v_i64m1x6_tum(vm, vd, rs1, vl); } -vuint64m1x6_t test_vlseg6e64_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1x6_t test_vlseg6e64_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg6e64_v_u64m1x6_tum(vm, vd, rs1, vl); } -vfloat64m1x6_t test_vlseg6e64_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, size_t vl) { +vfloat64m1x6_t test_vlseg6e64_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg6e64_v_f64m1x6_tumu(vm, vd, rs1, vl); } -vint64m1x6_t test_vlseg6e64_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, size_t vl) { +vint64m1x6_t test_vlseg6e64_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg6e64_v_i64m1x6_tumu(vm, vd, rs1, vl); } -vuint64m1x6_t test_vlseg6e64_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1x6_t test_vlseg6e64_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg6e64_v_u64m1x6_tumu(vm, vd, rs1, vl); } -vfloat64m1x6_t test_vlseg6e64_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, size_t vl) { +vfloat64m1x6_t test_vlseg6e64_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg6e64_v_f64m1x6_mu(vm, vd, rs1, vl); } -vint64m1x6_t test_vlseg6e64_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, size_t vl) { +vint64m1x6_t test_vlseg6e64_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg6e64_v_i64m1x6_mu(vm, vd, rs1, vl); } -vuint64m1x6_t test_vlseg6e64_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1x6_t test_vlseg6e64_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg6e64_v_u64m1x6_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg6e64ff.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg6e64ff.c index 35092e288..75e9dd0f7 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg6e64ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg6e64ff.c @@ -1,55 +1,78 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_tu(vfloat64m1x6_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_tu(vfloat64m1x6_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e64ff_v_f64m1x6_tu(vd, rs1, new_vl, vl); } -vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e64ff_v_i64m1x6_tu(vd, rs1, new_vl, vl); } -vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_tu(vuint64m1x6_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e64ff_v_u64m1x6_tu(vd, rs1, new_vl, vl); } -vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e64ff_v_f64m1x6_tum(vm, vd, rs1, new_vl, vl); } -vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e64ff_v_i64m1x6_tum(vm, vd, rs1, new_vl, vl); } -vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e64ff_v_u64m1x6_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e64ff_v_f64m1x6_tumu(vm, vd, rs1, new_vl, vl); } -vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e64ff_v_i64m1x6_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e64ff_v_u64m1x6_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1x6_t test_vlseg6e64ff_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e64ff_v_f64m1x6_mu(vm, vd, rs1, new_vl, vl); } -vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1x6_t test_vlseg6e64ff_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e64ff_v_i64m1x6_mu(vm, vd, rs1, new_vl, vl); } -vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1x6_t test_vlseg6e64ff_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e64ff_v_u64m1x6_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg6e8.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg6e8.c index 3185f1f23..5f892dc0a 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg6e8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg6e8.c @@ -5,130 +5,162 @@ #include -vint8mf8x6_t test_vlseg6e8_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, size_t vl) { +vint8mf8x6_t test_vlseg6e8_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg6e8_v_i8mf8x6_tu(vd, rs1, vl); } -vint8mf4x6_t test_vlseg6e8_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, size_t vl) { +vint8mf4x6_t test_vlseg6e8_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg6e8_v_i8mf4x6_tu(vd, rs1, vl); } -vint8mf2x6_t test_vlseg6e8_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, size_t vl) { +vint8mf2x6_t test_vlseg6e8_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg6e8_v_i8mf2x6_tu(vd, rs1, vl); } -vint8m1x6_t test_vlseg6e8_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, size_t vl) { +vint8m1x6_t test_vlseg6e8_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg6e8_v_i8m1x6_tu(vd, rs1, vl); } -vuint8mf8x6_t test_vlseg6e8_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8x6_t test_vlseg6e8_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg6e8_v_u8mf8x6_tu(vd, rs1, vl); } -vuint8mf4x6_t test_vlseg6e8_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4x6_t test_vlseg6e8_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg6e8_v_u8mf4x6_tu(vd, rs1, vl); } -vuint8mf2x6_t test_vlseg6e8_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2x6_t test_vlseg6e8_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg6e8_v_u8mf2x6_tu(vd, rs1, vl); } -vuint8m1x6_t test_vlseg6e8_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, size_t vl) { +vuint8m1x6_t test_vlseg6e8_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg6e8_v_u8m1x6_tu(vd, rs1, vl); } -vint8mf8x6_t test_vlseg6e8_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, size_t vl) { +vint8mf8x6_t test_vlseg6e8_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg6e8_v_i8mf8x6_tum(vm, vd, rs1, vl); } -vint8mf4x6_t test_vlseg6e8_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, size_t vl) { +vint8mf4x6_t test_vlseg6e8_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg6e8_v_i8mf4x6_tum(vm, vd, rs1, vl); } -vint8mf2x6_t test_vlseg6e8_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, size_t vl) { +vint8mf2x6_t test_vlseg6e8_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg6e8_v_i8mf2x6_tum(vm, vd, rs1, vl); } -vint8m1x6_t test_vlseg6e8_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, size_t vl) { +vint8m1x6_t test_vlseg6e8_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg6e8_v_i8m1x6_tum(vm, vd, rs1, vl); } -vuint8mf8x6_t test_vlseg6e8_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8x6_t test_vlseg6e8_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg6e8_v_u8mf8x6_tum(vm, vd, rs1, vl); } -vuint8mf4x6_t test_vlseg6e8_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4x6_t test_vlseg6e8_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg6e8_v_u8mf4x6_tum(vm, vd, rs1, vl); } -vuint8mf2x6_t test_vlseg6e8_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2x6_t test_vlseg6e8_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg6e8_v_u8mf2x6_tum(vm, vd, rs1, vl); } -vuint8m1x6_t test_vlseg6e8_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, size_t vl) { +vuint8m1x6_t test_vlseg6e8_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg6e8_v_u8m1x6_tum(vm, vd, rs1, vl); } -vint8mf8x6_t test_vlseg6e8_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, size_t vl) { +vint8mf8x6_t test_vlseg6e8_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg6e8_v_i8mf8x6_tumu(vm, vd, rs1, vl); } -vint8mf4x6_t test_vlseg6e8_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, size_t vl) { +vint8mf4x6_t test_vlseg6e8_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg6e8_v_i8mf4x6_tumu(vm, vd, rs1, vl); } -vint8mf2x6_t test_vlseg6e8_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, size_t vl) { +vint8mf2x6_t test_vlseg6e8_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg6e8_v_i8mf2x6_tumu(vm, vd, rs1, vl); } -vint8m1x6_t test_vlseg6e8_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, size_t vl) { +vint8m1x6_t test_vlseg6e8_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg6e8_v_i8m1x6_tumu(vm, vd, rs1, vl); } -vuint8mf8x6_t test_vlseg6e8_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8x6_t test_vlseg6e8_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg6e8_v_u8mf8x6_tumu(vm, vd, rs1, vl); } -vuint8mf4x6_t test_vlseg6e8_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4x6_t test_vlseg6e8_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg6e8_v_u8mf4x6_tumu(vm, vd, rs1, vl); } -vuint8mf2x6_t test_vlseg6e8_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2x6_t test_vlseg6e8_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg6e8_v_u8mf2x6_tumu(vm, vd, rs1, vl); } -vuint8m1x6_t test_vlseg6e8_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, size_t vl) { +vuint8m1x6_t test_vlseg6e8_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg6e8_v_u8m1x6_tumu(vm, vd, rs1, vl); } -vint8mf8x6_t test_vlseg6e8_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, size_t vl) { +vint8mf8x6_t test_vlseg6e8_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg6e8_v_i8mf8x6_mu(vm, vd, rs1, vl); } -vint8mf4x6_t test_vlseg6e8_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, size_t vl) { +vint8mf4x6_t test_vlseg6e8_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg6e8_v_i8mf4x6_mu(vm, vd, rs1, vl); } -vint8mf2x6_t test_vlseg6e8_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, size_t vl) { +vint8mf2x6_t test_vlseg6e8_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg6e8_v_i8mf2x6_mu(vm, vd, rs1, vl); } -vint8m1x6_t test_vlseg6e8_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, size_t vl) { +vint8m1x6_t test_vlseg6e8_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg6e8_v_i8m1x6_mu(vm, vd, rs1, vl); } -vuint8mf8x6_t test_vlseg6e8_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8x6_t test_vlseg6e8_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg6e8_v_u8mf8x6_mu(vm, vd, rs1, vl); } -vuint8mf4x6_t test_vlseg6e8_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4x6_t test_vlseg6e8_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg6e8_v_u8mf4x6_mu(vm, vd, rs1, vl); } -vuint8mf2x6_t test_vlseg6e8_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2x6_t test_vlseg6e8_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg6e8_v_u8mf2x6_mu(vm, vd, rs1, vl); } -vuint8m1x6_t test_vlseg6e8_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, size_t vl) { +vuint8m1x6_t test_vlseg6e8_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg6e8_v_u8m1x6_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg6e8ff.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg6e8ff.c index 38a87f33e..55995c32f 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg6e8ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg6e8ff.c @@ -1,135 +1,191 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e8ff_v_i8mf8x6_tu(vd, rs1, new_vl, vl); } -vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e8ff_v_i8mf4x6_tu(vd, rs1, new_vl, vl); } -vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e8ff_v_i8mf2x6_tu(vd, rs1, new_vl, vl); } -vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e8ff_v_i8m1x6_tu(vd, rs1, new_vl, vl); } -vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e8ff_v_u8mf8x6_tu(vd, rs1, new_vl, vl); } -vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e8ff_v_u8mf4x6_tu(vd, rs1, new_vl, vl); } -vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e8ff_v_u8mf2x6_tu(vd, rs1, new_vl, vl); } -vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg6e8ff_v_u8m1x6_tu(vd, rs1, new_vl, vl); } -vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e8ff_v_i8mf8x6_tum(vm, vd, rs1, new_vl, vl); } -vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e8ff_v_i8mf4x6_tum(vm, vd, rs1, new_vl, vl); } -vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e8ff_v_i8mf2x6_tum(vm, vd, rs1, new_vl, vl); } -vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e8ff_v_i8m1x6_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e8ff_v_u8mf8x6_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e8ff_v_u8mf4x6_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e8ff_v_u8mf2x6_tum(vm, vd, rs1, new_vl, vl); } -vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e8ff_v_u8m1x6_tum(vm, vd, rs1, new_vl, vl); } -vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e8ff_v_i8mf8x6_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e8ff_v_i8mf4x6_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e8ff_v_i8mf2x6_tumu(vm, vd, rs1, new_vl, vl); } -vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e8ff_v_i8m1x6_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e8ff_v_u8mf8x6_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e8ff_v_u8mf4x6_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e8ff_v_u8mf2x6_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e8ff_v_u8m1x6_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8x6_t test_vlseg6e8ff_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e8ff_v_i8mf8x6_mu(vm, vd, rs1, new_vl, vl); } -vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4x6_t test_vlseg6e8ff_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e8ff_v_i8mf4x6_mu(vm, vd, rs1, new_vl, vl); } -vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2x6_t test_vlseg6e8ff_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e8ff_v_i8mf2x6_mu(vm, vd, rs1, new_vl, vl); } -vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1x6_t test_vlseg6e8ff_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e8ff_v_i8m1x6_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8x6_t test_vlseg6e8ff_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e8ff_v_u8mf8x6_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4x6_t test_vlseg6e8ff_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e8ff_v_u8mf4x6_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2x6_t test_vlseg6e8ff_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e8ff_v_u8mf2x6_mu(vm, vd, rs1, new_vl, vl); } -vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1x6_t test_vlseg6e8ff_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg6e8ff_v_u8m1x6_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg7e16.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg7e16.c index 6a44076f3..35e04558e 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg7e16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg7e16.c @@ -1,151 +1,187 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x7_t test_vlseg7e16_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4x7_t test_vlseg7e16_v_f16mf4x7_tu(vfloat16mf4x7_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg7e16_v_f16mf4x7_tu(vd, rs1, vl); } -vfloat16mf2x7_t test_vlseg7e16_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2x7_t test_vlseg7e16_v_f16mf2x7_tu(vfloat16mf2x7_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg7e16_v_f16mf2x7_tu(vd, rs1, vl); } -vfloat16m1x7_t test_vlseg7e16_v_f16m1x7_tu(vfloat16m1x7_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1x7_t test_vlseg7e16_v_f16m1x7_tu(vfloat16m1x7_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg7e16_v_f16m1x7_tu(vd, rs1, vl); } -vint16mf4x7_t test_vlseg7e16_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, size_t vl) { +vint16mf4x7_t test_vlseg7e16_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, + size_t vl) { return __riscv_vlseg7e16_v_i16mf4x7_tu(vd, rs1, vl); } -vint16mf2x7_t test_vlseg7e16_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, size_t vl) { +vint16mf2x7_t test_vlseg7e16_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, + size_t vl) { return __riscv_vlseg7e16_v_i16mf2x7_tu(vd, rs1, vl); } -vint16m1x7_t test_vlseg7e16_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, size_t vl) { +vint16m1x7_t test_vlseg7e16_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, + size_t vl) { return __riscv_vlseg7e16_v_i16m1x7_tu(vd, rs1, vl); } -vuint16mf4x7_t test_vlseg7e16_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4x7_t test_vlseg7e16_v_u16mf4x7_tu(vuint16mf4x7_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg7e16_v_u16mf4x7_tu(vd, rs1, vl); } -vuint16mf2x7_t test_vlseg7e16_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2x7_t test_vlseg7e16_v_u16mf2x7_tu(vuint16mf2x7_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg7e16_v_u16mf2x7_tu(vd, rs1, vl); } -vuint16m1x7_t test_vlseg7e16_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1x7_t test_vlseg7e16_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, + size_t vl) { return __riscv_vlseg7e16_v_u16m1x7_tu(vd, rs1, vl); } -vfloat16mf4x7_t test_vlseg7e16_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4x7_t test_vlseg7e16_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg7e16_v_f16mf4x7_tum(vm, vd, rs1, vl); } -vfloat16mf2x7_t test_vlseg7e16_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2x7_t test_vlseg7e16_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg7e16_v_f16mf2x7_tum(vm, vd, rs1, vl); } -vfloat16m1x7_t test_vlseg7e16_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1x7_t test_vlseg7e16_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg7e16_v_f16m1x7_tum(vm, vd, rs1, vl); } -vint16mf4x7_t test_vlseg7e16_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, size_t vl) { +vint16mf4x7_t test_vlseg7e16_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg7e16_v_i16mf4x7_tum(vm, vd, rs1, vl); } -vint16mf2x7_t test_vlseg7e16_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, size_t vl) { +vint16mf2x7_t test_vlseg7e16_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg7e16_v_i16mf2x7_tum(vm, vd, rs1, vl); } -vint16m1x7_t test_vlseg7e16_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, size_t vl) { +vint16m1x7_t test_vlseg7e16_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg7e16_v_i16m1x7_tum(vm, vd, rs1, vl); } -vuint16mf4x7_t test_vlseg7e16_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4x7_t test_vlseg7e16_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg7e16_v_u16mf4x7_tum(vm, vd, rs1, vl); } -vuint16mf2x7_t test_vlseg7e16_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2x7_t test_vlseg7e16_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg7e16_v_u16mf2x7_tum(vm, vd, rs1, vl); } -vuint16m1x7_t test_vlseg7e16_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1x7_t test_vlseg7e16_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg7e16_v_u16m1x7_tum(vm, vd, rs1, vl); } -vfloat16mf4x7_t test_vlseg7e16_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4x7_t test_vlseg7e16_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg7e16_v_f16mf4x7_tumu(vm, vd, rs1, vl); } -vfloat16mf2x7_t test_vlseg7e16_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2x7_t test_vlseg7e16_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg7e16_v_f16mf2x7_tumu(vm, vd, rs1, vl); } -vfloat16m1x7_t test_vlseg7e16_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1x7_t test_vlseg7e16_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg7e16_v_f16m1x7_tumu(vm, vd, rs1, vl); } -vint16mf4x7_t test_vlseg7e16_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, size_t vl) { +vint16mf4x7_t test_vlseg7e16_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg7e16_v_i16mf4x7_tumu(vm, vd, rs1, vl); } -vint16mf2x7_t test_vlseg7e16_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, size_t vl) { +vint16mf2x7_t test_vlseg7e16_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg7e16_v_i16mf2x7_tumu(vm, vd, rs1, vl); } -vint16m1x7_t test_vlseg7e16_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, size_t vl) { +vint16m1x7_t test_vlseg7e16_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg7e16_v_i16m1x7_tumu(vm, vd, rs1, vl); } -vuint16mf4x7_t test_vlseg7e16_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4x7_t test_vlseg7e16_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg7e16_v_u16mf4x7_tumu(vm, vd, rs1, vl); } -vuint16mf2x7_t test_vlseg7e16_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2x7_t test_vlseg7e16_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg7e16_v_u16mf2x7_tumu(vm, vd, rs1, vl); } -vuint16m1x7_t test_vlseg7e16_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1x7_t test_vlseg7e16_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg7e16_v_u16m1x7_tumu(vm, vd, rs1, vl); } -vfloat16mf4x7_t test_vlseg7e16_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4x7_t test_vlseg7e16_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg7e16_v_f16mf4x7_mu(vm, vd, rs1, vl); } -vfloat16mf2x7_t test_vlseg7e16_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2x7_t test_vlseg7e16_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg7e16_v_f16mf2x7_mu(vm, vd, rs1, vl); } -vfloat16m1x7_t test_vlseg7e16_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1x7_t test_vlseg7e16_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg7e16_v_f16m1x7_mu(vm, vd, rs1, vl); } -vint16mf4x7_t test_vlseg7e16_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, size_t vl) { +vint16mf4x7_t test_vlseg7e16_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg7e16_v_i16mf4x7_mu(vm, vd, rs1, vl); } -vint16mf2x7_t test_vlseg7e16_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, size_t vl) { +vint16mf2x7_t test_vlseg7e16_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg7e16_v_i16mf2x7_mu(vm, vd, rs1, vl); } -vint16m1x7_t test_vlseg7e16_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, size_t vl) { +vint16m1x7_t test_vlseg7e16_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg7e16_v_i16m1x7_mu(vm, vd, rs1, vl); } -vuint16mf4x7_t test_vlseg7e16_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4x7_t test_vlseg7e16_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg7e16_v_u16mf4x7_mu(vm, vd, rs1, vl); } -vuint16mf2x7_t test_vlseg7e16_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2x7_t test_vlseg7e16_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg7e16_v_u16mf2x7_mu(vm, vd, rs1, vl); } -vuint16m1x7_t test_vlseg7e16_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1x7_t test_vlseg7e16_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg7e16_v_u16m1x7_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg7e16ff.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg7e16ff.c index 5a3fba33d..127b9c80b 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg7e16ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg7e16ff.c @@ -1,151 +1,226 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_tu(vfloat16mf4x7_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e16ff_v_f16mf4x7_tu(vd, rs1, new_vl, vl); } -vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_tu(vfloat16mf2x7_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e16ff_v_f16mf2x7_tu(vd, rs1, new_vl, vl); } -vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_tu(vfloat16m1x7_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_tu(vfloat16m1x7_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e16ff_v_f16m1x7_tu(vd, rs1, new_vl, vl); } -vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_tu(vint16mf4x7_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e16ff_v_i16mf4x7_tu(vd, rs1, new_vl, vl); } -vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_tu(vint16mf2x7_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e16ff_v_i16mf2x7_tu(vd, rs1, new_vl, vl); } -vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e16ff_v_i16m1x7_tu(vd, rs1, new_vl, vl); } -vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_tu(vuint16mf4x7_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e16ff_v_u16mf4x7_tu(vd, rs1, new_vl, vl); } -vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_tu(vuint16mf2x7_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e16ff_v_u16mf2x7_tu(vd, rs1, new_vl, vl); } -vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_tu(vuint16m1x7_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e16ff_v_u16m1x7_tu(vd, rs1, new_vl, vl); } -vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_tum(vbool64_t vm, + vfloat16mf4x7_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e16ff_v_f16mf4x7_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_tum(vbool32_t vm, + vfloat16mf2x7_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e16ff_v_f16mf2x7_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e16ff_v_f16m1x7_tum(vm, vd, rs1, new_vl, vl); } -vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e16ff_v_i16mf4x7_tum(vm, vd, rs1, new_vl, vl); } -vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e16ff_v_i16mf2x7_tum(vm, vd, rs1, new_vl, vl); } -vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e16ff_v_i16m1x7_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e16ff_v_u16mf4x7_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e16ff_v_u16mf2x7_tum(vm, vd, rs1, new_vl, vl); } -vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e16ff_v_u16m1x7_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_tumu(vbool64_t vm, + vfloat16mf4x7_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e16ff_v_f16mf4x7_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_tumu(vbool32_t vm, + vfloat16mf2x7_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e16ff_v_f16mf2x7_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e16ff_v_f16m1x7_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e16ff_v_i16mf4x7_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e16ff_v_i16mf2x7_tumu(vm, vd, rs1, new_vl, vl); } -vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e16ff_v_i16m1x7_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e16ff_v_u16mf4x7_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e16ff_v_u16mf2x7_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e16ff_v_u16m1x7_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4x7_t test_vlseg7e16ff_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e16ff_v_f16mf4x7_mu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2x7_t test_vlseg7e16ff_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e16ff_v_f16mf2x7_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1x7_t test_vlseg7e16ff_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e16ff_v_f16m1x7_mu(vm, vd, rs1, new_vl, vl); } -vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4x7_t test_vlseg7e16ff_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e16ff_v_i16mf4x7_mu(vm, vd, rs1, new_vl, vl); } -vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2x7_t test_vlseg7e16ff_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e16ff_v_i16mf2x7_mu(vm, vd, rs1, new_vl, vl); } -vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1x7_t test_vlseg7e16ff_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e16ff_v_i16m1x7_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4x7_t test_vlseg7e16ff_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e16ff_v_u16mf4x7_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2x7_t test_vlseg7e16ff_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e16ff_v_u16mf2x7_mu(vm, vd, rs1, new_vl, vl); } -vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1x7_t test_vlseg7e16ff_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e16ff_v_u16m1x7_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg7e32.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg7e32.c index c7fe4b3ff..062a9d026 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg7e32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg7e32.c @@ -1,103 +1,127 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32mf2x7_t test_vlseg7e32_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float *rs1, size_t vl) { +vfloat32mf2x7_t test_vlseg7e32_v_f32mf2x7_tu(vfloat32mf2x7_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg7e32_v_f32mf2x7_tu(vd, rs1, vl); } -vfloat32m1x7_t test_vlseg7e32_v_f32m1x7_tu(vfloat32m1x7_t vd, const float *rs1, size_t vl) { +vfloat32m1x7_t test_vlseg7e32_v_f32m1x7_tu(vfloat32m1x7_t vd, const float *rs1, + size_t vl) { return __riscv_vlseg7e32_v_f32m1x7_tu(vd, rs1, vl); } -vint32mf2x7_t test_vlseg7e32_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, size_t vl) { +vint32mf2x7_t test_vlseg7e32_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, + size_t vl) { return __riscv_vlseg7e32_v_i32mf2x7_tu(vd, rs1, vl); } -vint32m1x7_t test_vlseg7e32_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, size_t vl) { +vint32m1x7_t test_vlseg7e32_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, + size_t vl) { return __riscv_vlseg7e32_v_i32m1x7_tu(vd, rs1, vl); } -vuint32mf2x7_t test_vlseg7e32_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2x7_t test_vlseg7e32_v_u32mf2x7_tu(vuint32mf2x7_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg7e32_v_u32mf2x7_tu(vd, rs1, vl); } -vuint32m1x7_t test_vlseg7e32_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1x7_t test_vlseg7e32_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, + size_t vl) { return __riscv_vlseg7e32_v_u32m1x7_tu(vd, rs1, vl); } -vfloat32mf2x7_t test_vlseg7e32_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, size_t vl) { +vfloat32mf2x7_t test_vlseg7e32_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg7e32_v_f32mf2x7_tum(vm, vd, rs1, vl); } -vfloat32m1x7_t test_vlseg7e32_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, size_t vl) { +vfloat32m1x7_t test_vlseg7e32_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg7e32_v_f32m1x7_tum(vm, vd, rs1, vl); } -vint32mf2x7_t test_vlseg7e32_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, size_t vl) { +vint32mf2x7_t test_vlseg7e32_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg7e32_v_i32mf2x7_tum(vm, vd, rs1, vl); } -vint32m1x7_t test_vlseg7e32_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, size_t vl) { +vint32m1x7_t test_vlseg7e32_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg7e32_v_i32m1x7_tum(vm, vd, rs1, vl); } -vuint32mf2x7_t test_vlseg7e32_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2x7_t test_vlseg7e32_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg7e32_v_u32mf2x7_tum(vm, vd, rs1, vl); } -vuint32m1x7_t test_vlseg7e32_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1x7_t test_vlseg7e32_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg7e32_v_u32m1x7_tum(vm, vd, rs1, vl); } -vfloat32mf2x7_t test_vlseg7e32_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, size_t vl) { +vfloat32mf2x7_t test_vlseg7e32_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg7e32_v_f32mf2x7_tumu(vm, vd, rs1, vl); } -vfloat32m1x7_t test_vlseg7e32_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, size_t vl) { +vfloat32m1x7_t test_vlseg7e32_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg7e32_v_f32m1x7_tumu(vm, vd, rs1, vl); } -vint32mf2x7_t test_vlseg7e32_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, size_t vl) { +vint32mf2x7_t test_vlseg7e32_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg7e32_v_i32mf2x7_tumu(vm, vd, rs1, vl); } -vint32m1x7_t test_vlseg7e32_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, size_t vl) { +vint32m1x7_t test_vlseg7e32_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg7e32_v_i32m1x7_tumu(vm, vd, rs1, vl); } -vuint32mf2x7_t test_vlseg7e32_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2x7_t test_vlseg7e32_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg7e32_v_u32mf2x7_tumu(vm, vd, rs1, vl); } -vuint32m1x7_t test_vlseg7e32_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1x7_t test_vlseg7e32_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg7e32_v_u32m1x7_tumu(vm, vd, rs1, vl); } -vfloat32mf2x7_t test_vlseg7e32_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, size_t vl) { +vfloat32mf2x7_t test_vlseg7e32_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg7e32_v_f32mf2x7_mu(vm, vd, rs1, vl); } -vfloat32m1x7_t test_vlseg7e32_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, size_t vl) { +vfloat32m1x7_t test_vlseg7e32_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg7e32_v_f32m1x7_mu(vm, vd, rs1, vl); } -vint32mf2x7_t test_vlseg7e32_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, size_t vl) { +vint32mf2x7_t test_vlseg7e32_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg7e32_v_i32mf2x7_mu(vm, vd, rs1, vl); } -vint32m1x7_t test_vlseg7e32_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, size_t vl) { +vint32m1x7_t test_vlseg7e32_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg7e32_v_i32m1x7_mu(vm, vd, rs1, vl); } -vuint32mf2x7_t test_vlseg7e32_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2x7_t test_vlseg7e32_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg7e32_v_u32mf2x7_mu(vm, vd, rs1, vl); } -vuint32m1x7_t test_vlseg7e32_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1x7_t test_vlseg7e32_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg7e32_v_u32m1x7_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg7e32ff.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg7e32ff.c index cf30aa4d9..1ac2d18aa 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg7e32ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg7e32ff.c @@ -1,103 +1,152 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_tu(vfloat32mf2x7_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e32ff_v_f32mf2x7_tu(vd, rs1, new_vl, vl); } -vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_tu(vfloat32m1x7_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_tu(vfloat32m1x7_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e32ff_v_f32m1x7_tu(vd, rs1, new_vl, vl); } -vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_tu(vint32mf2x7_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e32ff_v_i32mf2x7_tu(vd, rs1, new_vl, vl); } -vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e32ff_v_i32m1x7_tu(vd, rs1, new_vl, vl); } -vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_tu(vuint32mf2x7_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e32ff_v_u32mf2x7_tu(vd, rs1, new_vl, vl); } -vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_tu(vuint32m1x7_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e32ff_v_u32m1x7_tu(vd, rs1, new_vl, vl); } -vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_tum(vbool64_t vm, + vfloat32mf2x7_t vd, + const float *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e32ff_v_f32mf2x7_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e32ff_v_f32m1x7_tum(vm, vd, rs1, new_vl, vl); } -vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e32ff_v_i32mf2x7_tum(vm, vd, rs1, new_vl, vl); } -vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e32ff_v_i32m1x7_tum(vm, vd, rs1, new_vl, vl); } -vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e32ff_v_u32mf2x7_tum(vm, vd, rs1, new_vl, vl); } -vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e32ff_v_u32m1x7_tum(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_tumu(vbool64_t vm, + vfloat32mf2x7_t vd, + const float *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e32ff_v_f32mf2x7_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e32ff_v_f32m1x7_tumu(vm, vd, rs1, new_vl, vl); } -vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e32ff_v_i32mf2x7_tumu(vm, vd, rs1, new_vl, vl); } -vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e32ff_v_i32m1x7_tumu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e32ff_v_u32mf2x7_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e32ff_v_u32m1x7_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2x7_t test_vlseg7e32ff_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e32ff_v_f32mf2x7_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1x7_t test_vlseg7e32ff_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e32ff_v_f32m1x7_mu(vm, vd, rs1, new_vl, vl); } -vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2x7_t test_vlseg7e32ff_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e32ff_v_i32mf2x7_mu(vm, vd, rs1, new_vl, vl); } -vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1x7_t test_vlseg7e32ff_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e32ff_v_i32m1x7_mu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2x7_t test_vlseg7e32ff_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e32ff_v_u32mf2x7_mu(vm, vd, rs1, new_vl, vl); } -vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1x7_t test_vlseg7e32ff_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e32ff_v_u32m1x7_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg7e64.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg7e64.c index 40092b7c6..ea49db942 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg7e64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg7e64.c @@ -1,55 +1,67 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat64m1x7_t test_vlseg7e64_v_f64m1x7_tu(vfloat64m1x7_t vd, const double *rs1, size_t vl) { +vfloat64m1x7_t test_vlseg7e64_v_f64m1x7_tu(vfloat64m1x7_t vd, const double *rs1, + size_t vl) { return __riscv_vlseg7e64_v_f64m1x7_tu(vd, rs1, vl); } -vint64m1x7_t test_vlseg7e64_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, size_t vl) { +vint64m1x7_t test_vlseg7e64_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, + size_t vl) { return __riscv_vlseg7e64_v_i64m1x7_tu(vd, rs1, vl); } -vuint64m1x7_t test_vlseg7e64_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1x7_t test_vlseg7e64_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, + size_t vl) { return __riscv_vlseg7e64_v_u64m1x7_tu(vd, rs1, vl); } -vfloat64m1x7_t test_vlseg7e64_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, size_t vl) { +vfloat64m1x7_t test_vlseg7e64_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg7e64_v_f64m1x7_tum(vm, vd, rs1, vl); } -vint64m1x7_t test_vlseg7e64_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, size_t vl) { +vint64m1x7_t test_vlseg7e64_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg7e64_v_i64m1x7_tum(vm, vd, rs1, vl); } -vuint64m1x7_t test_vlseg7e64_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1x7_t test_vlseg7e64_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg7e64_v_u64m1x7_tum(vm, vd, rs1, vl); } -vfloat64m1x7_t test_vlseg7e64_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, size_t vl) { +vfloat64m1x7_t test_vlseg7e64_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg7e64_v_f64m1x7_tumu(vm, vd, rs1, vl); } -vint64m1x7_t test_vlseg7e64_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, size_t vl) { +vint64m1x7_t test_vlseg7e64_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg7e64_v_i64m1x7_tumu(vm, vd, rs1, vl); } -vuint64m1x7_t test_vlseg7e64_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1x7_t test_vlseg7e64_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg7e64_v_u64m1x7_tumu(vm, vd, rs1, vl); } -vfloat64m1x7_t test_vlseg7e64_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, size_t vl) { +vfloat64m1x7_t test_vlseg7e64_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg7e64_v_f64m1x7_mu(vm, vd, rs1, vl); } -vint64m1x7_t test_vlseg7e64_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, size_t vl) { +vint64m1x7_t test_vlseg7e64_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg7e64_v_i64m1x7_mu(vm, vd, rs1, vl); } -vuint64m1x7_t test_vlseg7e64_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1x7_t test_vlseg7e64_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg7e64_v_u64m1x7_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg7e64ff.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg7e64ff.c index b4332349a..c1722bd81 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg7e64ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg7e64ff.c @@ -1,55 +1,78 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_tu(vfloat64m1x7_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_tu(vfloat64m1x7_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e64ff_v_f64m1x7_tu(vd, rs1, new_vl, vl); } -vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e64ff_v_i64m1x7_tu(vd, rs1, new_vl, vl); } -vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_tu(vuint64m1x7_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e64ff_v_u64m1x7_tu(vd, rs1, new_vl, vl); } -vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e64ff_v_f64m1x7_tum(vm, vd, rs1, new_vl, vl); } -vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e64ff_v_i64m1x7_tum(vm, vd, rs1, new_vl, vl); } -vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e64ff_v_u64m1x7_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e64ff_v_f64m1x7_tumu(vm, vd, rs1, new_vl, vl); } -vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e64ff_v_i64m1x7_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e64ff_v_u64m1x7_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1x7_t test_vlseg7e64ff_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e64ff_v_f64m1x7_mu(vm, vd, rs1, new_vl, vl); } -vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1x7_t test_vlseg7e64ff_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e64ff_v_i64m1x7_mu(vm, vd, rs1, new_vl, vl); } -vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1x7_t test_vlseg7e64ff_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e64ff_v_u64m1x7_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg7e8.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg7e8.c index e4f81dd92..d302d5d6b 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg7e8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg7e8.c @@ -5,130 +5,162 @@ #include -vint8mf8x7_t test_vlseg7e8_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, size_t vl) { +vint8mf8x7_t test_vlseg7e8_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg7e8_v_i8mf8x7_tu(vd, rs1, vl); } -vint8mf4x7_t test_vlseg7e8_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, size_t vl) { +vint8mf4x7_t test_vlseg7e8_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg7e8_v_i8mf4x7_tu(vd, rs1, vl); } -vint8mf2x7_t test_vlseg7e8_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, size_t vl) { +vint8mf2x7_t test_vlseg7e8_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg7e8_v_i8mf2x7_tu(vd, rs1, vl); } -vint8m1x7_t test_vlseg7e8_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, size_t vl) { +vint8m1x7_t test_vlseg7e8_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg7e8_v_i8m1x7_tu(vd, rs1, vl); } -vuint8mf8x7_t test_vlseg7e8_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8x7_t test_vlseg7e8_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg7e8_v_u8mf8x7_tu(vd, rs1, vl); } -vuint8mf4x7_t test_vlseg7e8_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4x7_t test_vlseg7e8_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg7e8_v_u8mf4x7_tu(vd, rs1, vl); } -vuint8mf2x7_t test_vlseg7e8_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2x7_t test_vlseg7e8_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg7e8_v_u8mf2x7_tu(vd, rs1, vl); } -vuint8m1x7_t test_vlseg7e8_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, size_t vl) { +vuint8m1x7_t test_vlseg7e8_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg7e8_v_u8m1x7_tu(vd, rs1, vl); } -vint8mf8x7_t test_vlseg7e8_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, size_t vl) { +vint8mf8x7_t test_vlseg7e8_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg7e8_v_i8mf8x7_tum(vm, vd, rs1, vl); } -vint8mf4x7_t test_vlseg7e8_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, size_t vl) { +vint8mf4x7_t test_vlseg7e8_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg7e8_v_i8mf4x7_tum(vm, vd, rs1, vl); } -vint8mf2x7_t test_vlseg7e8_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, size_t vl) { +vint8mf2x7_t test_vlseg7e8_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg7e8_v_i8mf2x7_tum(vm, vd, rs1, vl); } -vint8m1x7_t test_vlseg7e8_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, size_t vl) { +vint8m1x7_t test_vlseg7e8_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg7e8_v_i8m1x7_tum(vm, vd, rs1, vl); } -vuint8mf8x7_t test_vlseg7e8_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8x7_t test_vlseg7e8_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg7e8_v_u8mf8x7_tum(vm, vd, rs1, vl); } -vuint8mf4x7_t test_vlseg7e8_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4x7_t test_vlseg7e8_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg7e8_v_u8mf4x7_tum(vm, vd, rs1, vl); } -vuint8mf2x7_t test_vlseg7e8_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2x7_t test_vlseg7e8_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg7e8_v_u8mf2x7_tum(vm, vd, rs1, vl); } -vuint8m1x7_t test_vlseg7e8_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, size_t vl) { +vuint8m1x7_t test_vlseg7e8_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg7e8_v_u8m1x7_tum(vm, vd, rs1, vl); } -vint8mf8x7_t test_vlseg7e8_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, size_t vl) { +vint8mf8x7_t test_vlseg7e8_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg7e8_v_i8mf8x7_tumu(vm, vd, rs1, vl); } -vint8mf4x7_t test_vlseg7e8_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, size_t vl) { +vint8mf4x7_t test_vlseg7e8_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg7e8_v_i8mf4x7_tumu(vm, vd, rs1, vl); } -vint8mf2x7_t test_vlseg7e8_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, size_t vl) { +vint8mf2x7_t test_vlseg7e8_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg7e8_v_i8mf2x7_tumu(vm, vd, rs1, vl); } -vint8m1x7_t test_vlseg7e8_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, size_t vl) { +vint8m1x7_t test_vlseg7e8_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg7e8_v_i8m1x7_tumu(vm, vd, rs1, vl); } -vuint8mf8x7_t test_vlseg7e8_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8x7_t test_vlseg7e8_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg7e8_v_u8mf8x7_tumu(vm, vd, rs1, vl); } -vuint8mf4x7_t test_vlseg7e8_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4x7_t test_vlseg7e8_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg7e8_v_u8mf4x7_tumu(vm, vd, rs1, vl); } -vuint8mf2x7_t test_vlseg7e8_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2x7_t test_vlseg7e8_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg7e8_v_u8mf2x7_tumu(vm, vd, rs1, vl); } -vuint8m1x7_t test_vlseg7e8_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, size_t vl) { +vuint8m1x7_t test_vlseg7e8_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg7e8_v_u8m1x7_tumu(vm, vd, rs1, vl); } -vint8mf8x7_t test_vlseg7e8_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, size_t vl) { +vint8mf8x7_t test_vlseg7e8_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg7e8_v_i8mf8x7_mu(vm, vd, rs1, vl); } -vint8mf4x7_t test_vlseg7e8_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, size_t vl) { +vint8mf4x7_t test_vlseg7e8_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg7e8_v_i8mf4x7_mu(vm, vd, rs1, vl); } -vint8mf2x7_t test_vlseg7e8_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, size_t vl) { +vint8mf2x7_t test_vlseg7e8_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg7e8_v_i8mf2x7_mu(vm, vd, rs1, vl); } -vint8m1x7_t test_vlseg7e8_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, size_t vl) { +vint8m1x7_t test_vlseg7e8_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg7e8_v_i8m1x7_mu(vm, vd, rs1, vl); } -vuint8mf8x7_t test_vlseg7e8_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8x7_t test_vlseg7e8_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg7e8_v_u8mf8x7_mu(vm, vd, rs1, vl); } -vuint8mf4x7_t test_vlseg7e8_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4x7_t test_vlseg7e8_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg7e8_v_u8mf4x7_mu(vm, vd, rs1, vl); } -vuint8mf2x7_t test_vlseg7e8_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2x7_t test_vlseg7e8_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg7e8_v_u8mf2x7_mu(vm, vd, rs1, vl); } -vuint8m1x7_t test_vlseg7e8_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, size_t vl) { +vuint8m1x7_t test_vlseg7e8_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg7e8_v_u8m1x7_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg7e8ff.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg7e8ff.c index 0087f5be3..d2fa804e5 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg7e8ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg7e8ff.c @@ -1,135 +1,191 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e8ff_v_i8mf8x7_tu(vd, rs1, new_vl, vl); } -vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e8ff_v_i8mf4x7_tu(vd, rs1, new_vl, vl); } -vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e8ff_v_i8mf2x7_tu(vd, rs1, new_vl, vl); } -vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e8ff_v_i8m1x7_tu(vd, rs1, new_vl, vl); } -vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e8ff_v_u8mf8x7_tu(vd, rs1, new_vl, vl); } -vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e8ff_v_u8mf4x7_tu(vd, rs1, new_vl, vl); } -vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e8ff_v_u8mf2x7_tu(vd, rs1, new_vl, vl); } -vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg7e8ff_v_u8m1x7_tu(vd, rs1, new_vl, vl); } -vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e8ff_v_i8mf8x7_tum(vm, vd, rs1, new_vl, vl); } -vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e8ff_v_i8mf4x7_tum(vm, vd, rs1, new_vl, vl); } -vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e8ff_v_i8mf2x7_tum(vm, vd, rs1, new_vl, vl); } -vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e8ff_v_i8m1x7_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e8ff_v_u8mf8x7_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e8ff_v_u8mf4x7_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e8ff_v_u8mf2x7_tum(vm, vd, rs1, new_vl, vl); } -vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e8ff_v_u8m1x7_tum(vm, vd, rs1, new_vl, vl); } -vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e8ff_v_i8mf8x7_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e8ff_v_i8mf4x7_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e8ff_v_i8mf2x7_tumu(vm, vd, rs1, new_vl, vl); } -vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e8ff_v_i8m1x7_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e8ff_v_u8mf8x7_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e8ff_v_u8mf4x7_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e8ff_v_u8mf2x7_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e8ff_v_u8m1x7_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8x7_t test_vlseg7e8ff_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e8ff_v_i8mf8x7_mu(vm, vd, rs1, new_vl, vl); } -vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4x7_t test_vlseg7e8ff_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e8ff_v_i8mf4x7_mu(vm, vd, rs1, new_vl, vl); } -vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2x7_t test_vlseg7e8ff_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e8ff_v_i8mf2x7_mu(vm, vd, rs1, new_vl, vl); } -vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1x7_t test_vlseg7e8ff_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e8ff_v_i8m1x7_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8x7_t test_vlseg7e8ff_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e8ff_v_u8mf8x7_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4x7_t test_vlseg7e8ff_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e8ff_v_u8mf4x7_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2x7_t test_vlseg7e8ff_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e8ff_v_u8mf2x7_mu(vm, vd, rs1, new_vl, vl); } -vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1x7_t test_vlseg7e8ff_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg7e8ff_v_u8m1x7_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg8e16.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg8e16.c index 02e8e5974..bc7eb6b96 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg8e16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg8e16.c @@ -1,151 +1,187 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x8_t test_vlseg8e16_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4x8_t test_vlseg8e16_v_f16mf4x8_tu(vfloat16mf4x8_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg8e16_v_f16mf4x8_tu(vd, rs1, vl); } -vfloat16mf2x8_t test_vlseg8e16_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2x8_t test_vlseg8e16_v_f16mf2x8_tu(vfloat16mf2x8_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg8e16_v_f16mf2x8_tu(vd, rs1, vl); } -vfloat16m1x8_t test_vlseg8e16_v_f16m1x8_tu(vfloat16m1x8_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1x8_t test_vlseg8e16_v_f16m1x8_tu(vfloat16m1x8_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg8e16_v_f16m1x8_tu(vd, rs1, vl); } -vint16mf4x8_t test_vlseg8e16_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, size_t vl) { +vint16mf4x8_t test_vlseg8e16_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, + size_t vl) { return __riscv_vlseg8e16_v_i16mf4x8_tu(vd, rs1, vl); } -vint16mf2x8_t test_vlseg8e16_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, size_t vl) { +vint16mf2x8_t test_vlseg8e16_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, + size_t vl) { return __riscv_vlseg8e16_v_i16mf2x8_tu(vd, rs1, vl); } -vint16m1x8_t test_vlseg8e16_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, size_t vl) { +vint16m1x8_t test_vlseg8e16_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, + size_t vl) { return __riscv_vlseg8e16_v_i16m1x8_tu(vd, rs1, vl); } -vuint16mf4x8_t test_vlseg8e16_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4x8_t test_vlseg8e16_v_u16mf4x8_tu(vuint16mf4x8_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg8e16_v_u16mf4x8_tu(vd, rs1, vl); } -vuint16mf2x8_t test_vlseg8e16_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2x8_t test_vlseg8e16_v_u16mf2x8_tu(vuint16mf2x8_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg8e16_v_u16mf2x8_tu(vd, rs1, vl); } -vuint16m1x8_t test_vlseg8e16_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1x8_t test_vlseg8e16_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, + size_t vl) { return __riscv_vlseg8e16_v_u16m1x8_tu(vd, rs1, vl); } -vfloat16mf4x8_t test_vlseg8e16_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4x8_t test_vlseg8e16_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg8e16_v_f16mf4x8_tum(vm, vd, rs1, vl); } -vfloat16mf2x8_t test_vlseg8e16_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2x8_t test_vlseg8e16_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg8e16_v_f16mf2x8_tum(vm, vd, rs1, vl); } -vfloat16m1x8_t test_vlseg8e16_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1x8_t test_vlseg8e16_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg8e16_v_f16m1x8_tum(vm, vd, rs1, vl); } -vint16mf4x8_t test_vlseg8e16_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, size_t vl) { +vint16mf4x8_t test_vlseg8e16_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg8e16_v_i16mf4x8_tum(vm, vd, rs1, vl); } -vint16mf2x8_t test_vlseg8e16_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, size_t vl) { +vint16mf2x8_t test_vlseg8e16_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg8e16_v_i16mf2x8_tum(vm, vd, rs1, vl); } -vint16m1x8_t test_vlseg8e16_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, size_t vl) { +vint16m1x8_t test_vlseg8e16_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg8e16_v_i16m1x8_tum(vm, vd, rs1, vl); } -vuint16mf4x8_t test_vlseg8e16_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4x8_t test_vlseg8e16_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg8e16_v_u16mf4x8_tum(vm, vd, rs1, vl); } -vuint16mf2x8_t test_vlseg8e16_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2x8_t test_vlseg8e16_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg8e16_v_u16mf2x8_tum(vm, vd, rs1, vl); } -vuint16m1x8_t test_vlseg8e16_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1x8_t test_vlseg8e16_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg8e16_v_u16m1x8_tum(vm, vd, rs1, vl); } -vfloat16mf4x8_t test_vlseg8e16_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4x8_t test_vlseg8e16_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg8e16_v_f16mf4x8_tumu(vm, vd, rs1, vl); } -vfloat16mf2x8_t test_vlseg8e16_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2x8_t test_vlseg8e16_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg8e16_v_f16mf2x8_tumu(vm, vd, rs1, vl); } -vfloat16m1x8_t test_vlseg8e16_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1x8_t test_vlseg8e16_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg8e16_v_f16m1x8_tumu(vm, vd, rs1, vl); } -vint16mf4x8_t test_vlseg8e16_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, size_t vl) { +vint16mf4x8_t test_vlseg8e16_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg8e16_v_i16mf4x8_tumu(vm, vd, rs1, vl); } -vint16mf2x8_t test_vlseg8e16_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, size_t vl) { +vint16mf2x8_t test_vlseg8e16_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg8e16_v_i16mf2x8_tumu(vm, vd, rs1, vl); } -vint16m1x8_t test_vlseg8e16_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, size_t vl) { +vint16m1x8_t test_vlseg8e16_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg8e16_v_i16m1x8_tumu(vm, vd, rs1, vl); } -vuint16mf4x8_t test_vlseg8e16_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4x8_t test_vlseg8e16_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg8e16_v_u16mf4x8_tumu(vm, vd, rs1, vl); } -vuint16mf2x8_t test_vlseg8e16_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2x8_t test_vlseg8e16_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg8e16_v_u16mf2x8_tumu(vm, vd, rs1, vl); } -vuint16m1x8_t test_vlseg8e16_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1x8_t test_vlseg8e16_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg8e16_v_u16m1x8_tumu(vm, vd, rs1, vl); } -vfloat16mf4x8_t test_vlseg8e16_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf4x8_t test_vlseg8e16_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg8e16_v_f16mf4x8_mu(vm, vd, rs1, vl); } -vfloat16mf2x8_t test_vlseg8e16_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, size_t vl) { +vfloat16mf2x8_t test_vlseg8e16_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg8e16_v_f16mf2x8_mu(vm, vd, rs1, vl); } -vfloat16m1x8_t test_vlseg8e16_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, size_t vl) { +vfloat16m1x8_t test_vlseg8e16_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, size_t vl) { return __riscv_vlseg8e16_v_f16m1x8_mu(vm, vd, rs1, vl); } -vint16mf4x8_t test_vlseg8e16_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, size_t vl) { +vint16mf4x8_t test_vlseg8e16_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg8e16_v_i16mf4x8_mu(vm, vd, rs1, vl); } -vint16mf2x8_t test_vlseg8e16_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, size_t vl) { +vint16mf2x8_t test_vlseg8e16_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg8e16_v_i16mf2x8_mu(vm, vd, rs1, vl); } -vint16m1x8_t test_vlseg8e16_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, size_t vl) { +vint16m1x8_t test_vlseg8e16_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, size_t vl) { return __riscv_vlseg8e16_v_i16m1x8_mu(vm, vd, rs1, vl); } -vuint16mf4x8_t test_vlseg8e16_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf4x8_t test_vlseg8e16_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg8e16_v_u16mf4x8_mu(vm, vd, rs1, vl); } -vuint16mf2x8_t test_vlseg8e16_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, size_t vl) { +vuint16mf2x8_t test_vlseg8e16_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg8e16_v_u16mf2x8_mu(vm, vd, rs1, vl); } -vuint16m1x8_t test_vlseg8e16_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, size_t vl) { +vuint16m1x8_t test_vlseg8e16_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, size_t vl) { return __riscv_vlseg8e16_v_u16m1x8_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg8e16ff.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg8e16ff.c index a0153b112..3357428a2 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg8e16ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg8e16ff.c @@ -1,151 +1,226 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_tu(vfloat16mf4x8_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e16ff_v_f16mf4x8_tu(vd, rs1, new_vl, vl); } -vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_tu(vfloat16mf2x8_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e16ff_v_f16mf2x8_tu(vd, rs1, new_vl, vl); } -vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_tu(vfloat16m1x8_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_tu(vfloat16m1x8_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e16ff_v_f16m1x8_tu(vd, rs1, new_vl, vl); } -vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_tu(vint16mf4x8_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e16ff_v_i16mf4x8_tu(vd, rs1, new_vl, vl); } -vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_tu(vint16mf2x8_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e16ff_v_i16mf2x8_tu(vd, rs1, new_vl, vl); } -vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e16ff_v_i16m1x8_tu(vd, rs1, new_vl, vl); } -vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_tu(vuint16mf4x8_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e16ff_v_u16mf4x8_tu(vd, rs1, new_vl, vl); } -vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_tu(vuint16mf2x8_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e16ff_v_u16mf2x8_tu(vd, rs1, new_vl, vl); } -vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_tu(vuint16m1x8_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e16ff_v_u16m1x8_tu(vd, rs1, new_vl, vl); } -vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_tum(vbool64_t vm, + vfloat16mf4x8_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e16ff_v_f16mf4x8_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_tum(vbool32_t vm, + vfloat16mf2x8_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e16ff_v_f16mf2x8_tum(vm, vd, rs1, new_vl, vl); } -vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e16ff_v_f16m1x8_tum(vm, vd, rs1, new_vl, vl); } -vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e16ff_v_i16mf4x8_tum(vm, vd, rs1, new_vl, vl); } -vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e16ff_v_i16mf2x8_tum(vm, vd, rs1, new_vl, vl); } -vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e16ff_v_i16m1x8_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e16ff_v_u16mf4x8_tum(vm, vd, rs1, new_vl, vl); } -vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e16ff_v_u16mf2x8_tum(vm, vd, rs1, new_vl, vl); } -vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e16ff_v_u16m1x8_tum(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_tumu(vbool64_t vm, + vfloat16mf4x8_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e16ff_v_f16mf4x8_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_tumu(vbool32_t vm, + vfloat16mf2x8_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e16ff_v_f16mf2x8_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e16ff_v_f16m1x8_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e16ff_v_i16mf4x8_tumu(vm, vd, rs1, new_vl, vl); } -vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e16ff_v_i16mf2x8_tumu(vm, vd, rs1, new_vl, vl); } -vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e16ff_v_i16m1x8_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e16ff_v_u16mf4x8_tumu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e16ff_v_u16mf2x8_tumu(vm, vd, rs1, new_vl, vl); } -vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e16ff_v_u16m1x8_tumu(vm, vd, rs1, new_vl, vl); } -vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf4x8_t test_vlseg8e16ff_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e16ff_v_f16mf4x8_mu(vm, vd, rs1, new_vl, vl); } -vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16mf2x8_t test_vlseg8e16ff_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e16ff_v_f16mf2x8_mu(vm, vd, rs1, new_vl, vl); } -vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, size_t *new_vl, size_t vl) { +vfloat16m1x8_t test_vlseg8e16ff_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e16ff_v_f16m1x8_mu(vm, vd, rs1, new_vl, vl); } -vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf4x8_t test_vlseg8e16ff_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e16ff_v_i16mf4x8_mu(vm, vd, rs1, new_vl, vl); } -vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16mf2x8_t test_vlseg8e16ff_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e16ff_v_i16mf2x8_mu(vm, vd, rs1, new_vl, vl); } -vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, size_t *new_vl, size_t vl) { +vint16m1x8_t test_vlseg8e16ff_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e16ff_v_i16m1x8_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf4x8_t test_vlseg8e16ff_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e16ff_v_u16mf4x8_mu(vm, vd, rs1, new_vl, vl); } -vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16mf2x8_t test_vlseg8e16ff_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, + const uint16_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e16ff_v_u16mf2x8_mu(vm, vd, rs1, new_vl, vl); } -vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, size_t *new_vl, size_t vl) { +vuint16m1x8_t test_vlseg8e16ff_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e16ff_v_u16m1x8_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg8e32.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg8e32.c index 0752a67e7..b03b18dcb 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg8e32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg8e32.c @@ -1,103 +1,127 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32mf2x8_t test_vlseg8e32_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float *rs1, size_t vl) { +vfloat32mf2x8_t test_vlseg8e32_v_f32mf2x8_tu(vfloat32mf2x8_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg8e32_v_f32mf2x8_tu(vd, rs1, vl); } -vfloat32m1x8_t test_vlseg8e32_v_f32m1x8_tu(vfloat32m1x8_t vd, const float *rs1, size_t vl) { +vfloat32m1x8_t test_vlseg8e32_v_f32m1x8_tu(vfloat32m1x8_t vd, const float *rs1, + size_t vl) { return __riscv_vlseg8e32_v_f32m1x8_tu(vd, rs1, vl); } -vint32mf2x8_t test_vlseg8e32_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, size_t vl) { +vint32mf2x8_t test_vlseg8e32_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, + size_t vl) { return __riscv_vlseg8e32_v_i32mf2x8_tu(vd, rs1, vl); } -vint32m1x8_t test_vlseg8e32_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, size_t vl) { +vint32m1x8_t test_vlseg8e32_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, + size_t vl) { return __riscv_vlseg8e32_v_i32m1x8_tu(vd, rs1, vl); } -vuint32mf2x8_t test_vlseg8e32_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2x8_t test_vlseg8e32_v_u32mf2x8_tu(vuint32mf2x8_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg8e32_v_u32mf2x8_tu(vd, rs1, vl); } -vuint32m1x8_t test_vlseg8e32_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1x8_t test_vlseg8e32_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, + size_t vl) { return __riscv_vlseg8e32_v_u32m1x8_tu(vd, rs1, vl); } -vfloat32mf2x8_t test_vlseg8e32_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, size_t vl) { +vfloat32mf2x8_t test_vlseg8e32_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg8e32_v_f32mf2x8_tum(vm, vd, rs1, vl); } -vfloat32m1x8_t test_vlseg8e32_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, size_t vl) { +vfloat32m1x8_t test_vlseg8e32_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg8e32_v_f32m1x8_tum(vm, vd, rs1, vl); } -vint32mf2x8_t test_vlseg8e32_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, size_t vl) { +vint32mf2x8_t test_vlseg8e32_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg8e32_v_i32mf2x8_tum(vm, vd, rs1, vl); } -vint32m1x8_t test_vlseg8e32_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, size_t vl) { +vint32m1x8_t test_vlseg8e32_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg8e32_v_i32m1x8_tum(vm, vd, rs1, vl); } -vuint32mf2x8_t test_vlseg8e32_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2x8_t test_vlseg8e32_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg8e32_v_u32mf2x8_tum(vm, vd, rs1, vl); } -vuint32m1x8_t test_vlseg8e32_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1x8_t test_vlseg8e32_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg8e32_v_u32m1x8_tum(vm, vd, rs1, vl); } -vfloat32mf2x8_t test_vlseg8e32_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, size_t vl) { +vfloat32mf2x8_t test_vlseg8e32_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg8e32_v_f32mf2x8_tumu(vm, vd, rs1, vl); } -vfloat32m1x8_t test_vlseg8e32_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, size_t vl) { +vfloat32m1x8_t test_vlseg8e32_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg8e32_v_f32m1x8_tumu(vm, vd, rs1, vl); } -vint32mf2x8_t test_vlseg8e32_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, size_t vl) { +vint32mf2x8_t test_vlseg8e32_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg8e32_v_i32mf2x8_tumu(vm, vd, rs1, vl); } -vint32m1x8_t test_vlseg8e32_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, size_t vl) { +vint32m1x8_t test_vlseg8e32_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg8e32_v_i32m1x8_tumu(vm, vd, rs1, vl); } -vuint32mf2x8_t test_vlseg8e32_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2x8_t test_vlseg8e32_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg8e32_v_u32mf2x8_tumu(vm, vd, rs1, vl); } -vuint32m1x8_t test_vlseg8e32_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1x8_t test_vlseg8e32_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg8e32_v_u32m1x8_tumu(vm, vd, rs1, vl); } -vfloat32mf2x8_t test_vlseg8e32_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, size_t vl) { +vfloat32mf2x8_t test_vlseg8e32_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg8e32_v_f32mf2x8_mu(vm, vd, rs1, vl); } -vfloat32m1x8_t test_vlseg8e32_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, size_t vl) { +vfloat32m1x8_t test_vlseg8e32_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, size_t vl) { return __riscv_vlseg8e32_v_f32m1x8_mu(vm, vd, rs1, vl); } -vint32mf2x8_t test_vlseg8e32_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, size_t vl) { +vint32mf2x8_t test_vlseg8e32_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg8e32_v_i32mf2x8_mu(vm, vd, rs1, vl); } -vint32m1x8_t test_vlseg8e32_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, size_t vl) { +vint32m1x8_t test_vlseg8e32_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, size_t vl) { return __riscv_vlseg8e32_v_i32m1x8_mu(vm, vd, rs1, vl); } -vuint32mf2x8_t test_vlseg8e32_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, size_t vl) { +vuint32mf2x8_t test_vlseg8e32_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg8e32_v_u32mf2x8_mu(vm, vd, rs1, vl); } -vuint32m1x8_t test_vlseg8e32_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, size_t vl) { +vuint32m1x8_t test_vlseg8e32_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, size_t vl) { return __riscv_vlseg8e32_v_u32m1x8_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg8e32ff.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg8e32ff.c index 1a96fb8f5..1e706c9b0 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg8e32ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg8e32ff.c @@ -1,103 +1,152 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_tu(vfloat32mf2x8_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e32ff_v_f32mf2x8_tu(vd, rs1, new_vl, vl); } -vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_tu(vfloat32m1x8_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_tu(vfloat32m1x8_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e32ff_v_f32m1x8_tu(vd, rs1, new_vl, vl); } -vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_tu(vint32mf2x8_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e32ff_v_i32mf2x8_tu(vd, rs1, new_vl, vl); } -vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e32ff_v_i32m1x8_tu(vd, rs1, new_vl, vl); } -vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_tu(vuint32mf2x8_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e32ff_v_u32mf2x8_tu(vd, rs1, new_vl, vl); } -vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_tu(vuint32m1x8_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e32ff_v_u32m1x8_tu(vd, rs1, new_vl, vl); } -vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_tum(vbool64_t vm, + vfloat32mf2x8_t vd, + const float *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e32ff_v_f32mf2x8_tum(vm, vd, rs1, new_vl, vl); } -vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e32ff_v_f32m1x8_tum(vm, vd, rs1, new_vl, vl); } -vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e32ff_v_i32mf2x8_tum(vm, vd, rs1, new_vl, vl); } -vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e32ff_v_i32m1x8_tum(vm, vd, rs1, new_vl, vl); } -vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e32ff_v_u32mf2x8_tum(vm, vd, rs1, new_vl, vl); } -vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e32ff_v_u32m1x8_tum(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_tumu(vbool64_t vm, + vfloat32mf2x8_t vd, + const float *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e32ff_v_f32mf2x8_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e32ff_v_f32m1x8_tumu(vm, vd, rs1, new_vl, vl); } -vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e32ff_v_i32mf2x8_tumu(vm, vd, rs1, new_vl, vl); } -vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e32ff_v_i32m1x8_tumu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e32ff_v_u32mf2x8_tumu(vm, vd, rs1, new_vl, vl); } -vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e32ff_v_u32m1x8_tumu(vm, vd, rs1, new_vl, vl); } -vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32mf2x8_t test_vlseg8e32ff_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e32ff_v_f32mf2x8_mu(vm, vd, rs1, new_vl, vl); } -vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, size_t *new_vl, size_t vl) { +vfloat32m1x8_t test_vlseg8e32ff_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e32ff_v_f32m1x8_mu(vm, vd, rs1, new_vl, vl); } -vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32mf2x8_t test_vlseg8e32ff_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e32ff_v_i32mf2x8_mu(vm, vd, rs1, new_vl, vl); } -vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, size_t *new_vl, size_t vl) { +vint32m1x8_t test_vlseg8e32ff_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e32ff_v_i32m1x8_mu(vm, vd, rs1, new_vl, vl); } -vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32mf2x8_t test_vlseg8e32ff_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, + const uint32_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e32ff_v_u32mf2x8_mu(vm, vd, rs1, new_vl, vl); } -vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, size_t *new_vl, size_t vl) { +vuint32m1x8_t test_vlseg8e32ff_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e32ff_v_u32m1x8_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg8e64.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg8e64.c index 80212b2d2..555d44d65 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg8e64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg8e64.c @@ -1,55 +1,67 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat64m1x8_t test_vlseg8e64_v_f64m1x8_tu(vfloat64m1x8_t vd, const double *rs1, size_t vl) { +vfloat64m1x8_t test_vlseg8e64_v_f64m1x8_tu(vfloat64m1x8_t vd, const double *rs1, + size_t vl) { return __riscv_vlseg8e64_v_f64m1x8_tu(vd, rs1, vl); } -vint64m1x8_t test_vlseg8e64_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, size_t vl) { +vint64m1x8_t test_vlseg8e64_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, + size_t vl) { return __riscv_vlseg8e64_v_i64m1x8_tu(vd, rs1, vl); } -vuint64m1x8_t test_vlseg8e64_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1x8_t test_vlseg8e64_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, + size_t vl) { return __riscv_vlseg8e64_v_u64m1x8_tu(vd, rs1, vl); } -vfloat64m1x8_t test_vlseg8e64_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, size_t vl) { +vfloat64m1x8_t test_vlseg8e64_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg8e64_v_f64m1x8_tum(vm, vd, rs1, vl); } -vint64m1x8_t test_vlseg8e64_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, size_t vl) { +vint64m1x8_t test_vlseg8e64_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg8e64_v_i64m1x8_tum(vm, vd, rs1, vl); } -vuint64m1x8_t test_vlseg8e64_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1x8_t test_vlseg8e64_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg8e64_v_u64m1x8_tum(vm, vd, rs1, vl); } -vfloat64m1x8_t test_vlseg8e64_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, size_t vl) { +vfloat64m1x8_t test_vlseg8e64_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg8e64_v_f64m1x8_tumu(vm, vd, rs1, vl); } -vint64m1x8_t test_vlseg8e64_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, size_t vl) { +vint64m1x8_t test_vlseg8e64_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg8e64_v_i64m1x8_tumu(vm, vd, rs1, vl); } -vuint64m1x8_t test_vlseg8e64_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1x8_t test_vlseg8e64_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg8e64_v_u64m1x8_tumu(vm, vd, rs1, vl); } -vfloat64m1x8_t test_vlseg8e64_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, size_t vl) { +vfloat64m1x8_t test_vlseg8e64_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, size_t vl) { return __riscv_vlseg8e64_v_f64m1x8_mu(vm, vd, rs1, vl); } -vint64m1x8_t test_vlseg8e64_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, size_t vl) { +vint64m1x8_t test_vlseg8e64_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, size_t vl) { return __riscv_vlseg8e64_v_i64m1x8_mu(vm, vd, rs1, vl); } -vuint64m1x8_t test_vlseg8e64_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, size_t vl) { +vuint64m1x8_t test_vlseg8e64_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, size_t vl) { return __riscv_vlseg8e64_v_u64m1x8_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg8e64ff.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg8e64ff.c index 1b8da9adf..aaade51e3 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg8e64ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg8e64ff.c @@ -1,55 +1,78 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_tu(vfloat64m1x8_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_tu(vfloat64m1x8_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e64ff_v_f64m1x8_tu(vd, rs1, new_vl, vl); } -vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e64ff_v_i64m1x8_tu(vd, rs1, new_vl, vl); } -vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_tu(vuint64m1x8_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e64ff_v_u64m1x8_tu(vd, rs1, new_vl, vl); } -vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e64ff_v_f64m1x8_tum(vm, vd, rs1, new_vl, vl); } -vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e64ff_v_i64m1x8_tum(vm, vd, rs1, new_vl, vl); } -vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e64ff_v_u64m1x8_tum(vm, vd, rs1, new_vl, vl); } -vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e64ff_v_f64m1x8_tumu(vm, vd, rs1, new_vl, vl); } -vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e64ff_v_i64m1x8_tumu(vm, vd, rs1, new_vl, vl); } -vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e64ff_v_u64m1x8_tumu(vm, vd, rs1, new_vl, vl); } -vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, size_t *new_vl, size_t vl) { +vfloat64m1x8_t test_vlseg8e64ff_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e64ff_v_f64m1x8_mu(vm, vd, rs1, new_vl, vl); } -vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, size_t *new_vl, size_t vl) { +vint64m1x8_t test_vlseg8e64ff_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e64ff_v_i64m1x8_mu(vm, vd, rs1, new_vl, vl); } -vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, size_t *new_vl, size_t vl) { +vuint64m1x8_t test_vlseg8e64ff_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e64ff_v_u64m1x8_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg8e8.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg8e8.c index feb8eb9c3..2d83bb4df 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg8e8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg8e8.c @@ -5,130 +5,162 @@ #include -vint8mf8x8_t test_vlseg8e8_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, size_t vl) { +vint8mf8x8_t test_vlseg8e8_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg8e8_v_i8mf8x8_tu(vd, rs1, vl); } -vint8mf4x8_t test_vlseg8e8_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, size_t vl) { +vint8mf4x8_t test_vlseg8e8_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg8e8_v_i8mf4x8_tu(vd, rs1, vl); } -vint8mf2x8_t test_vlseg8e8_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, size_t vl) { +vint8mf2x8_t test_vlseg8e8_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg8e8_v_i8mf2x8_tu(vd, rs1, vl); } -vint8m1x8_t test_vlseg8e8_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, size_t vl) { +vint8m1x8_t test_vlseg8e8_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, + size_t vl) { return __riscv_vlseg8e8_v_i8m1x8_tu(vd, rs1, vl); } -vuint8mf8x8_t test_vlseg8e8_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8x8_t test_vlseg8e8_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg8e8_v_u8mf8x8_tu(vd, rs1, vl); } -vuint8mf4x8_t test_vlseg8e8_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4x8_t test_vlseg8e8_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg8e8_v_u8mf4x8_tu(vd, rs1, vl); } -vuint8mf2x8_t test_vlseg8e8_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2x8_t test_vlseg8e8_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg8e8_v_u8mf2x8_tu(vd, rs1, vl); } -vuint8m1x8_t test_vlseg8e8_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, size_t vl) { +vuint8m1x8_t test_vlseg8e8_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, + size_t vl) { return __riscv_vlseg8e8_v_u8m1x8_tu(vd, rs1, vl); } -vint8mf8x8_t test_vlseg8e8_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, size_t vl) { +vint8mf8x8_t test_vlseg8e8_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg8e8_v_i8mf8x8_tum(vm, vd, rs1, vl); } -vint8mf4x8_t test_vlseg8e8_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, size_t vl) { +vint8mf4x8_t test_vlseg8e8_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg8e8_v_i8mf4x8_tum(vm, vd, rs1, vl); } -vint8mf2x8_t test_vlseg8e8_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, size_t vl) { +vint8mf2x8_t test_vlseg8e8_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg8e8_v_i8mf2x8_tum(vm, vd, rs1, vl); } -vint8m1x8_t test_vlseg8e8_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, size_t vl) { +vint8m1x8_t test_vlseg8e8_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg8e8_v_i8m1x8_tum(vm, vd, rs1, vl); } -vuint8mf8x8_t test_vlseg8e8_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8x8_t test_vlseg8e8_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg8e8_v_u8mf8x8_tum(vm, vd, rs1, vl); } -vuint8mf4x8_t test_vlseg8e8_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4x8_t test_vlseg8e8_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg8e8_v_u8mf4x8_tum(vm, vd, rs1, vl); } -vuint8mf2x8_t test_vlseg8e8_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2x8_t test_vlseg8e8_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg8e8_v_u8mf2x8_tum(vm, vd, rs1, vl); } -vuint8m1x8_t test_vlseg8e8_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, size_t vl) { +vuint8m1x8_t test_vlseg8e8_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg8e8_v_u8m1x8_tum(vm, vd, rs1, vl); } -vint8mf8x8_t test_vlseg8e8_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, size_t vl) { +vint8mf8x8_t test_vlseg8e8_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg8e8_v_i8mf8x8_tumu(vm, vd, rs1, vl); } -vint8mf4x8_t test_vlseg8e8_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, size_t vl) { +vint8mf4x8_t test_vlseg8e8_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg8e8_v_i8mf4x8_tumu(vm, vd, rs1, vl); } -vint8mf2x8_t test_vlseg8e8_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, size_t vl) { +vint8mf2x8_t test_vlseg8e8_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg8e8_v_i8mf2x8_tumu(vm, vd, rs1, vl); } -vint8m1x8_t test_vlseg8e8_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, size_t vl) { +vint8m1x8_t test_vlseg8e8_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg8e8_v_i8m1x8_tumu(vm, vd, rs1, vl); } -vuint8mf8x8_t test_vlseg8e8_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8x8_t test_vlseg8e8_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg8e8_v_u8mf8x8_tumu(vm, vd, rs1, vl); } -vuint8mf4x8_t test_vlseg8e8_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4x8_t test_vlseg8e8_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg8e8_v_u8mf4x8_tumu(vm, vd, rs1, vl); } -vuint8mf2x8_t test_vlseg8e8_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2x8_t test_vlseg8e8_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg8e8_v_u8mf2x8_tumu(vm, vd, rs1, vl); } -vuint8m1x8_t test_vlseg8e8_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, size_t vl) { +vuint8m1x8_t test_vlseg8e8_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg8e8_v_u8m1x8_tumu(vm, vd, rs1, vl); } -vint8mf8x8_t test_vlseg8e8_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, size_t vl) { +vint8mf8x8_t test_vlseg8e8_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg8e8_v_i8mf8x8_mu(vm, vd, rs1, vl); } -vint8mf4x8_t test_vlseg8e8_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, size_t vl) { +vint8mf4x8_t test_vlseg8e8_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg8e8_v_i8mf4x8_mu(vm, vd, rs1, vl); } -vint8mf2x8_t test_vlseg8e8_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, size_t vl) { +vint8mf2x8_t test_vlseg8e8_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg8e8_v_i8mf2x8_mu(vm, vd, rs1, vl); } -vint8m1x8_t test_vlseg8e8_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, size_t vl) { +vint8m1x8_t test_vlseg8e8_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, size_t vl) { return __riscv_vlseg8e8_v_i8m1x8_mu(vm, vd, rs1, vl); } -vuint8mf8x8_t test_vlseg8e8_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf8x8_t test_vlseg8e8_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg8e8_v_u8mf8x8_mu(vm, vd, rs1, vl); } -vuint8mf4x8_t test_vlseg8e8_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf4x8_t test_vlseg8e8_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg8e8_v_u8mf4x8_mu(vm, vd, rs1, vl); } -vuint8mf2x8_t test_vlseg8e8_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, size_t vl) { +vuint8mf2x8_t test_vlseg8e8_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg8e8_v_u8mf2x8_mu(vm, vd, rs1, vl); } -vuint8m1x8_t test_vlseg8e8_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, size_t vl) { +vuint8m1x8_t test_vlseg8e8_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, size_t vl) { return __riscv_vlseg8e8_v_u8m1x8_mu(vm, vd, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlseg8e8ff.c b/auto-generated/policy_funcs/llvm-api-tests/vlseg8e8ff.c index 9beaac596..f7db3628c 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlseg8e8ff.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlseg8e8ff.c @@ -1,135 +1,191 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e8ff_v_i8mf8x8_tu(vd, rs1, new_vl, vl); } -vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e8ff_v_i8mf4x8_tu(vd, rs1, new_vl, vl); } -vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e8ff_v_i8mf2x8_tu(vd, rs1, new_vl, vl); } -vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e8ff_v_i8m1x8_tu(vd, rs1, new_vl, vl); } -vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e8ff_v_u8mf8x8_tu(vd, rs1, new_vl, vl); } -vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e8ff_v_u8mf4x8_tu(vd, rs1, new_vl, vl); } -vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e8ff_v_u8mf2x8_tu(vd, rs1, new_vl, vl); } -vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, + size_t *new_vl, size_t vl) { return __riscv_vlseg8e8ff_v_u8m1x8_tu(vd, rs1, new_vl, vl); } -vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e8ff_v_i8mf8x8_tum(vm, vd, rs1, new_vl, vl); } -vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e8ff_v_i8mf4x8_tum(vm, vd, rs1, new_vl, vl); } -vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e8ff_v_i8mf2x8_tum(vm, vd, rs1, new_vl, vl); } -vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e8ff_v_i8m1x8_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e8ff_v_u8mf8x8_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e8ff_v_u8mf4x8_tum(vm, vd, rs1, new_vl, vl); } -vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e8ff_v_u8mf2x8_tum(vm, vd, rs1, new_vl, vl); } -vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e8ff_v_u8m1x8_tum(vm, vd, rs1, new_vl, vl); } -vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e8ff_v_i8mf8x8_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e8ff_v_i8mf4x8_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e8ff_v_i8mf2x8_tumu(vm, vd, rs1, new_vl, vl); } -vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e8ff_v_i8m1x8_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e8ff_v_u8mf8x8_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e8ff_v_u8mf4x8_tumu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e8ff_v_u8mf2x8_tumu(vm, vd, rs1, new_vl, vl); } -vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e8ff_v_u8m1x8_tumu(vm, vd, rs1, new_vl, vl); } -vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf8x8_t test_vlseg8e8ff_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e8ff_v_i8mf8x8_mu(vm, vd, rs1, new_vl, vl); } -vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf4x8_t test_vlseg8e8ff_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e8ff_v_i8mf4x8_mu(vm, vd, rs1, new_vl, vl); } -vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8mf2x8_t test_vlseg8e8ff_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e8ff_v_i8mf2x8_mu(vm, vd, rs1, new_vl, vl); } -vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, size_t *new_vl, size_t vl) { +vint8m1x8_t test_vlseg8e8ff_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e8ff_v_i8m1x8_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf8x8_t test_vlseg8e8ff_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e8ff_v_u8mf8x8_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf4x8_t test_vlseg8e8ff_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e8ff_v_u8mf4x8_mu(vm, vd, rs1, new_vl, vl); } -vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8mf2x8_t test_vlseg8e8ff_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e8ff_v_u8mf2x8_mu(vm, vd, rs1, new_vl, vl); } -vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, size_t *new_vl, size_t vl) { +vuint8m1x8_t test_vlseg8e8ff_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, size_t *new_vl, + size_t vl) { return __riscv_vlseg8e8ff_v_u8m1x8_mu(vm, vd, rs1, new_vl, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlsseg2e16.c b/auto-generated/policy_funcs/llvm-api-tests/vlsseg2e16.c index a4ee9618a..69a53246b 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlsseg2e16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlsseg2e16.c @@ -1,247 +1,366 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_tu(vfloat16mf4x2_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e16_v_f16mf4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_tu(vfloat16mf2x2_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e16_v_f16mf2x2_tu(vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_tu(vfloat16m1x2_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_tu(vfloat16m1x2_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_f16m1x2_tu(vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_tu(vfloat16m2x2_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_tu(vfloat16m2x2_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_f16m2x2_tu(vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_tu(vfloat16m4x2_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_tu(vfloat16m4x2_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_f16m4x2_tu(vd, rs1, rs2, vl); } -vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_tu(vint16mf4x2_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_i16mf4x2_tu(vd, rs1, rs2, vl); } -vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_tu(vint16mf2x2_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_i16mf2x2_tu(vd, rs1, rs2, vl); } -vint16m1x2_t test_vlsseg2e16_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e16_v_i16m1x2_tu(vd, rs1, rs2, vl); } -vint16m2x2_t test_vlsseg2e16_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e16_v_i16m2x2_tu(vd, rs1, rs2, vl); } -vint16m4x2_t test_vlsseg2e16_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e16_v_i16m4x2_tu(vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_tu(vuint16mf4x2_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_u16mf4x2_tu(vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_tu(vuint16mf2x2_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_u16mf2x2_tu(vd, rs1, rs2, vl); } -vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_tu(vuint16m1x2_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_u16m1x2_tu(vd, rs1, rs2, vl); } -vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_tu(vuint16m2x2_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_u16m2x2_tu(vd, rs1, rs2, vl); } -vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_tu(vuint16m4x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_tu(vuint16m4x2_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_u16m4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e16_v_f16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e16_v_f16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_f16m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_f16m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_f16m4x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_i16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_i16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vlsseg2e16_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_i16m1x2_tum(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vlsseg2e16_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_i16m2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vlsseg2e16_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_i16m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, + const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e16_v_u16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, + const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e16_v_u16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_u16m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_u16m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_u16m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_tumu(vbool64_t vm, + vfloat16mf4x2_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e16_v_f16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_tumu(vbool32_t vm, + vfloat16mf2x2_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e16_v_f16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e16_v_f16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e16_v_f16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e16_v_f16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_i16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_i16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vlsseg2e16_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_i16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vlsseg2e16_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_i16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vlsseg2e16_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_i16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, + const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e16_v_u16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, + const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e16_v_u16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_u16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_u16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_u16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4x2_t test_vlsseg2e16_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e16_v_f16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2x2_t test_vlsseg2e16_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e16_v_f16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1x2_t test_vlsseg2e16_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_f16m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m2x2_t test_vlsseg2e16_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_f16m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m4x2_t test_vlsseg2e16_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_f16m4x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4x2_t test_vlsseg2e16_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_i16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2x2_t test_vlsseg2e16_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_i16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vlsseg2e16_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1x2_t test_vlsseg2e16_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_i16m1x2_mu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vlsseg2e16_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m2x2_t test_vlsseg2e16_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_i16m2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vlsseg2e16_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m4x2_t test_vlsseg2e16_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_i16m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4x2_t test_vlsseg2e16_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_u16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2x2_t test_vlsseg2e16_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_u16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1x2_t test_vlsseg2e16_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_u16m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m2x2_t test_vlsseg2e16_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_u16m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m4x2_t test_vlsseg2e16_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e16_v_u16m4x2_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlsseg2e32.c b/auto-generated/policy_funcs/llvm-api-tests/vlsseg2e32.c index 28b15ebfb..2708bce24 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlsseg2e32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlsseg2e32.c @@ -1,199 +1,290 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tu(vfloat32mf2x2_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_f32mf2x2_tu(vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tu(vfloat32m1x2_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tu(vfloat32m1x2_t vd, const float *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e32_v_f32m1x2_tu(vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tu(vfloat32m2x2_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tu(vfloat32m2x2_t vd, const float *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e32_v_f32m2x2_tu(vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tu(vfloat32m4x2_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tu(vfloat32m4x2_t vd, const float *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e32_v_f32m4x2_tu(vd, rs1, rs2, vl); } -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tu(vint32mf2x2_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_i32mf2x2_tu(vd, rs1, rs2, vl); } -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e32_v_i32m1x2_tu(vd, rs1, rs2, vl); } -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e32_v_i32m2x2_tu(vd, rs1, rs2, vl); } -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e32_v_i32m4x2_tu(vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tu(vuint32mf2x2_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_u32mf2x2_tu(vd, rs1, rs2, vl); } -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tu(vuint32m1x2_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_u32m1x2_tu(vd, rs1, rs2, vl); } -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tu(vuint32m2x2_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_u32m2x2_tu(vd, rs1, rs2, vl); } -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tu(vuint32m4x2_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_u32m4x2_tu(vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_f32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_f32m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_f32m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_f32m4x2_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_i32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_i32m1x2_tum(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_i32m2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_i32m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, + const uint32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e32_v_u32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_u32m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_u32m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_u32m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_tumu(vbool64_t vm, + vfloat32mf2x2_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_f32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_f32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_f32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_f32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_i32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_i32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_i32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_i32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, + const uint32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e32_v_u32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_u32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_u32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_u32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2x2_t test_vlsseg2e32_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_f32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1x2_t test_vlsseg2e32_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_f32m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m2x2_t test_vlsseg2e32_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_f32m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m4x2_t test_vlsseg2e32_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_f32m4x2_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2x2_t test_vlsseg2e32_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_i32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vlsseg2e32_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1x2_t test_vlsseg2e32_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_i32m1x2_mu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vlsseg2e32_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m2x2_t test_vlsseg2e32_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_i32m2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vlsseg2e32_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m4x2_t test_vlsseg2e32_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_i32m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2x2_t test_vlsseg2e32_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_u32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1x2_t test_vlsseg2e32_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_u32m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m2x2_t test_vlsseg2e32_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_u32m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m4x2_t test_vlsseg2e32_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e32_v_u32m4x2_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlsseg2e64.c b/auto-generated/policy_funcs/llvm-api-tests/vlsseg2e64.c index f79ae8169..606957c48 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlsseg2e64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlsseg2e64.c @@ -1,151 +1,220 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_tu(vfloat64m1x2_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_tu(vfloat64m1x2_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_f64m1x2_tu(vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_tu(vfloat64m2x2_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_tu(vfloat64m2x2_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_f64m2x2_tu(vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_tu(vfloat64m4x2_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_tu(vfloat64m4x2_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_f64m4x2_tu(vd, rs1, rs2, vl); } -vint64m1x2_t test_vlsseg2e64_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e64_v_i64m1x2_tu(vd, rs1, rs2, vl); } -vint64m2x2_t test_vlsseg2e64_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e64_v_i64m2x2_tu(vd, rs1, rs2, vl); } -vint64m4x2_t test_vlsseg2e64_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e64_v_i64m4x2_tu(vd, rs1, rs2, vl); } -vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_tu(vuint64m1x2_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_u64m1x2_tu(vd, rs1, rs2, vl); } -vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_tu(vuint64m2x2_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_u64m2x2_tu(vd, rs1, rs2, vl); } -vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_tu(vuint64m4x2_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_u64m4x2_tu(vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_f64m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_f64m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_f64m4x2_tum(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vlsseg2e64_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_i64m1x2_tum(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vlsseg2e64_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_i64m2x2_tum(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vlsseg2e64_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_i64m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_u64m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_u64m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_u64m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_f64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_f64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_f64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vlsseg2e64_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_i64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vlsseg2e64_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_i64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vlsseg2e64_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_i64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_u64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_u64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_u64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1x2_t test_vlsseg2e64_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_f64m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m2x2_t test_vlsseg2e64_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_f64m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m4x2_t test_vlsseg2e64_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_f64m4x2_mu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vlsseg2e64_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1x2_t test_vlsseg2e64_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_i64m1x2_mu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vlsseg2e64_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m2x2_t test_vlsseg2e64_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_i64m2x2_mu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vlsseg2e64_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m4x2_t test_vlsseg2e64_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_i64m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1x2_t test_vlsseg2e64_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_u64m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m2x2_t test_vlsseg2e64_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_u64m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m4x2_t test_vlsseg2e64_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e64_v_u64m4x2_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlsseg2e8.c b/auto-generated/policy_funcs/llvm-api-tests/vlsseg2e8.c index fd727d94b..d9a0b00d7 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlsseg2e8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlsseg2e8.c @@ -5,194 +5,278 @@ #include -vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e8_v_i8mf8x2_tu(vd, rs1, rs2, vl); } -vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e8_v_i8mf4x2_tu(vd, rs1, rs2, vl); } -vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e8_v_i8mf2x2_tu(vd, rs1, rs2, vl); } -vint8m1x2_t test_vlsseg2e8_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e8_v_i8m1x2_tu(vd, rs1, rs2, vl); } -vint8m2x2_t test_vlsseg2e8_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e8_v_i8m2x2_tu(vd, rs1, rs2, vl); } -vint8m4x2_t test_vlsseg2e8_v_i8m4x2_tu(vint8m4x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_tu(vint8m4x2_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e8_v_i8m4x2_tu(vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e8_v_u8mf8x2_tu(vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e8_v_u8mf4x2_tu(vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e8_v_u8mf2x2_tu(vd, rs1, rs2, vl); } -vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e8_v_u8m1x2_tu(vd, rs1, rs2, vl); } -vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e8_v_u8m2x2_tu(vd, rs1, rs2, vl); } -vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_tu(vuint8m4x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_tu(vuint8m4x2_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg2e8_v_u8m4x2_tu(vd, rs1, rs2, vl); } -vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_i8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_i8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_i8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vlsseg2e8_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_i8m1x2_tum(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vlsseg2e8_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_i8m2x2_tum(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vlsseg2e8_v_i8m4x2_tum(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_tum(vbool2_t vm, vint8m4x2_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_i8m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_u8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_u8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_u8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_u8m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_u8m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_tum(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_tum(vbool2_t vm, vuint8m4x2_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_u8m4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_i8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_i8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_i8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vlsseg2e8_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_i8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vlsseg2e8_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_i8m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vlsseg2e8_v_i8m4x2_tumu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_tumu(vbool2_t vm, vint8m4x2_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_i8m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_u8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_u8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_u8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_u8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_u8m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_tumu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_tumu(vbool2_t vm, vuint8m4x2_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_u8m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8x2_t test_vlsseg2e8_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_i8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4x2_t test_vlsseg2e8_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_i8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2x2_t test_vlsseg2e8_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_i8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vlsseg2e8_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1x2_t test_vlsseg2e8_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_i8m1x2_mu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vlsseg2e8_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m2x2_t test_vlsseg2e8_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_i8m2x2_mu(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vlsseg2e8_v_i8m4x2_mu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m4x2_t test_vlsseg2e8_v_i8m4x2_mu(vbool2_t vm, vint8m4x2_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_i8m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8x2_t test_vlsseg2e8_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_u8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4x2_t test_vlsseg2e8_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_u8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2x2_t test_vlsseg2e8_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_u8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1x2_t test_vlsseg2e8_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_u8m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m2x2_t test_vlsseg2e8_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_u8m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_mu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m4x2_t test_vlsseg2e8_v_u8m4x2_mu(vbool2_t vm, vuint8m4x2_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg2e8_v_u8m4x2_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlsseg3e16.c b/auto-generated/policy_funcs/llvm-api-tests/vlsseg3e16.c index cc8a0ac9d..0f86e848f 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlsseg3e16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlsseg3e16.c @@ -1,199 +1,295 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_tu(vfloat16mf4x3_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e16_v_f16mf4x3_tu(vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_tu(vfloat16mf2x3_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e16_v_f16mf2x3_tu(vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_tu(vfloat16m1x3_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_tu(vfloat16m1x3_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_f16m1x3_tu(vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_tu(vfloat16m2x3_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_tu(vfloat16m2x3_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_f16m2x3_tu(vd, rs1, rs2, vl); } -vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_tu(vint16mf4x3_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_i16mf4x3_tu(vd, rs1, rs2, vl); } -vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_tu(vint16mf2x3_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_i16mf2x3_tu(vd, rs1, rs2, vl); } -vint16m1x3_t test_vlsseg3e16_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e16_v_i16m1x3_tu(vd, rs1, rs2, vl); } -vint16m2x3_t test_vlsseg3e16_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e16_v_i16m2x3_tu(vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_tu(vuint16mf4x3_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_u16mf4x3_tu(vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_tu(vuint16mf2x3_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_u16mf2x3_tu(vd, rs1, rs2, vl); } -vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_tu(vuint16m1x3_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_u16m1x3_tu(vd, rs1, rs2, vl); } -vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_tu(vuint16m2x3_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_u16m2x3_tu(vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e16_v_f16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e16_v_f16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_f16m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_f16m2x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_i16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_i16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vlsseg3e16_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_i16m1x3_tum(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vlsseg3e16_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_i16m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, + const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e16_v_u16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, + const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e16_v_u16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_u16m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_u16m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_tumu(vbool64_t vm, + vfloat16mf4x3_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e16_v_f16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_tumu(vbool32_t vm, + vfloat16mf2x3_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e16_v_f16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e16_v_f16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e16_v_f16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_i16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_i16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vlsseg3e16_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_i16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vlsseg3e16_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_i16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, + const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e16_v_u16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, + const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e16_v_u16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_u16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_u16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4x3_t test_vlsseg3e16_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e16_v_f16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2x3_t test_vlsseg3e16_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e16_v_f16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1x3_t test_vlsseg3e16_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_f16m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m2x3_t test_vlsseg3e16_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_f16m2x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4x3_t test_vlsseg3e16_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_i16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2x3_t test_vlsseg3e16_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_i16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vlsseg3e16_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1x3_t test_vlsseg3e16_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_i16m1x3_mu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vlsseg3e16_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m2x3_t test_vlsseg3e16_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_i16m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4x3_t test_vlsseg3e16_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_u16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2x3_t test_vlsseg3e16_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_u16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1x3_t test_vlsseg3e16_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_u16m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m2x3_t test_vlsseg3e16_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e16_v_u16m2x3_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlsseg3e32.c b/auto-generated/policy_funcs/llvm-api-tests/vlsseg3e32.c index 89e170b8e..8511dc04d 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlsseg3e32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlsseg3e32.c @@ -1,151 +1,220 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_tu(vfloat32mf2x3_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e32_v_f32mf2x3_tu(vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_tu(vfloat32m1x3_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_tu(vfloat32m1x3_t vd, const float *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e32_v_f32m1x3_tu(vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_tu(vfloat32m2x3_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_tu(vfloat32m2x3_t vd, const float *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e32_v_f32m2x3_tu(vd, rs1, rs2, vl); } -vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_tu(vint32mf2x3_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e32_v_i32mf2x3_tu(vd, rs1, rs2, vl); } -vint32m1x3_t test_vlsseg3e32_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e32_v_i32m1x3_tu(vd, rs1, rs2, vl); } -vint32m2x3_t test_vlsseg3e32_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e32_v_i32m2x3_tu(vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_tu(vuint32mf2x3_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e32_v_u32mf2x3_tu(vd, rs1, rs2, vl); } -vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_tu(vuint32m1x3_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e32_v_u32m1x3_tu(vd, rs1, rs2, vl); } -vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_tu(vuint32m2x3_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e32_v_u32m2x3_tu(vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e32_v_f32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e32_v_f32m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e32_v_f32m2x3_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e32_v_i32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vlsseg3e32_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e32_v_i32m1x3_tum(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vlsseg3e32_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e32_v_i32m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, + const uint32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e32_v_u32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e32_v_u32m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e32_v_u32m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_tumu(vbool64_t vm, + vfloat32mf2x3_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e32_v_f32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e32_v_f32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e32_v_f32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e32_v_i32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vlsseg3e32_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e32_v_i32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vlsseg3e32_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e32_v_i32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, + const uint32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e32_v_u32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e32_v_u32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e32_v_u32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2x3_t test_vlsseg3e32_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e32_v_f32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1x3_t test_vlsseg3e32_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e32_v_f32m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m2x3_t test_vlsseg3e32_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e32_v_f32m2x3_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2x3_t test_vlsseg3e32_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e32_v_i32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vlsseg3e32_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1x3_t test_vlsseg3e32_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e32_v_i32m1x3_mu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vlsseg3e32_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m2x3_t test_vlsseg3e32_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e32_v_i32m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2x3_t test_vlsseg3e32_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e32_v_u32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1x3_t test_vlsseg3e32_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e32_v_u32m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m2x3_t test_vlsseg3e32_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e32_v_u32m2x3_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlsseg3e64.c b/auto-generated/policy_funcs/llvm-api-tests/vlsseg3e64.c index c79dd026f..133fbd281 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlsseg3e64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlsseg3e64.c @@ -1,103 +1,149 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_tu(vfloat64m1x3_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_tu(vfloat64m1x3_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e64_v_f64m1x3_tu(vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_tu(vfloat64m2x3_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_tu(vfloat64m2x3_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e64_v_f64m2x3_tu(vd, rs1, rs2, vl); } -vint64m1x3_t test_vlsseg3e64_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e64_v_i64m1x3_tu(vd, rs1, rs2, vl); } -vint64m2x3_t test_vlsseg3e64_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e64_v_i64m2x3_tu(vd, rs1, rs2, vl); } -vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_tu(vuint64m1x3_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e64_v_u64m1x3_tu(vd, rs1, rs2, vl); } -vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_tu(vuint64m2x3_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e64_v_u64m2x3_tu(vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e64_v_f64m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e64_v_f64m2x3_tum(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vlsseg3e64_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e64_v_i64m1x3_tum(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vlsseg3e64_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e64_v_i64m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e64_v_u64m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e64_v_u64m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e64_v_f64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e64_v_f64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vlsseg3e64_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e64_v_i64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vlsseg3e64_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e64_v_i64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e64_v_u64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e64_v_u64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1x3_t test_vlsseg3e64_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e64_v_f64m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m2x3_t test_vlsseg3e64_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e64_v_f64m2x3_mu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vlsseg3e64_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1x3_t test_vlsseg3e64_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e64_v_i64m1x3_mu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vlsseg3e64_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m2x3_t test_vlsseg3e64_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e64_v_i64m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1x3_t test_vlsseg3e64_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e64_v_u64m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m2x3_t test_vlsseg3e64_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e64_v_u64m2x3_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlsseg3e8.c b/auto-generated/policy_funcs/llvm-api-tests/vlsseg3e8.c index 21ce67d95..f3e67151a 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlsseg3e8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlsseg3e8.c @@ -5,162 +5,232 @@ #include -vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e8_v_i8mf8x3_tu(vd, rs1, rs2, vl); } -vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e8_v_i8mf4x3_tu(vd, rs1, rs2, vl); } -vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e8_v_i8mf2x3_tu(vd, rs1, rs2, vl); } -vint8m1x3_t test_vlsseg3e8_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e8_v_i8m1x3_tu(vd, rs1, rs2, vl); } -vint8m2x3_t test_vlsseg3e8_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e8_v_i8m2x3_tu(vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e8_v_u8mf8x3_tu(vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e8_v_u8mf4x3_tu(vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e8_v_u8mf2x3_tu(vd, rs1, rs2, vl); } -vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e8_v_u8m1x3_tu(vd, rs1, rs2, vl); } -vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg3e8_v_u8m2x3_tu(vd, rs1, rs2, vl); } -vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e8_v_i8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e8_v_i8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e8_v_i8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vlsseg3e8_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e8_v_i8m1x3_tum(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vlsseg3e8_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e8_v_i8m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e8_v_u8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e8_v_u8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e8_v_u8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e8_v_u8m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e8_v_u8m2x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e8_v_i8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e8_v_i8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e8_v_i8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vlsseg3e8_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e8_v_i8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vlsseg3e8_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e8_v_i8m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e8_v_u8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e8_v_u8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e8_v_u8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e8_v_u8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e8_v_u8m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8x3_t test_vlsseg3e8_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e8_v_i8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4x3_t test_vlsseg3e8_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e8_v_i8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2x3_t test_vlsseg3e8_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e8_v_i8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vlsseg3e8_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1x3_t test_vlsseg3e8_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e8_v_i8m1x3_mu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vlsseg3e8_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m2x3_t test_vlsseg3e8_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e8_v_i8m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8x3_t test_vlsseg3e8_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e8_v_u8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4x3_t test_vlsseg3e8_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e8_v_u8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2x3_t test_vlsseg3e8_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e8_v_u8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1x3_t test_vlsseg3e8_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e8_v_u8m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m2x3_t test_vlsseg3e8_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg3e8_v_u8m2x3_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlsseg4e16.c b/auto-generated/policy_funcs/llvm-api-tests/vlsseg4e16.c index 25e195117..ee44044c0 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlsseg4e16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlsseg4e16.c @@ -1,199 +1,295 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_tu(vfloat16mf4x4_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e16_v_f16mf4x4_tu(vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_tu(vfloat16mf2x4_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e16_v_f16mf2x4_tu(vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_tu(vfloat16m1x4_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_tu(vfloat16m1x4_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_f16m1x4_tu(vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_tu(vfloat16m2x4_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_tu(vfloat16m2x4_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_f16m2x4_tu(vd, rs1, rs2, vl); } -vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_tu(vint16mf4x4_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_i16mf4x4_tu(vd, rs1, rs2, vl); } -vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_tu(vint16mf2x4_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_i16mf2x4_tu(vd, rs1, rs2, vl); } -vint16m1x4_t test_vlsseg4e16_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e16_v_i16m1x4_tu(vd, rs1, rs2, vl); } -vint16m2x4_t test_vlsseg4e16_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e16_v_i16m2x4_tu(vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_tu(vuint16mf4x4_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_u16mf4x4_tu(vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_tu(vuint16mf2x4_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_u16mf2x4_tu(vd, rs1, rs2, vl); } -vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_tu(vuint16m1x4_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_u16m1x4_tu(vd, rs1, rs2, vl); } -vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_tu(vuint16m2x4_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_u16m2x4_tu(vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e16_v_f16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e16_v_f16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_f16m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_f16m2x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_i16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_i16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vlsseg4e16_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_i16m1x4_tum(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vlsseg4e16_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_i16m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, + const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e16_v_u16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, + const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e16_v_u16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_u16m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_u16m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_tumu(vbool64_t vm, + vfloat16mf4x4_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e16_v_f16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_tumu(vbool32_t vm, + vfloat16mf2x4_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e16_v_f16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e16_v_f16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e16_v_f16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_i16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_i16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vlsseg4e16_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_i16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vlsseg4e16_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_i16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, + const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e16_v_u16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, + const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e16_v_u16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_u16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_u16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4x4_t test_vlsseg4e16_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e16_v_f16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2x4_t test_vlsseg4e16_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e16_v_f16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1x4_t test_vlsseg4e16_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_f16m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m2x4_t test_vlsseg4e16_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_f16m2x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4x4_t test_vlsseg4e16_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_i16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2x4_t test_vlsseg4e16_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_i16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vlsseg4e16_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1x4_t test_vlsseg4e16_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_i16m1x4_mu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vlsseg4e16_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m2x4_t test_vlsseg4e16_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_i16m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4x4_t test_vlsseg4e16_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_u16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2x4_t test_vlsseg4e16_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_u16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1x4_t test_vlsseg4e16_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_u16m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m2x4_t test_vlsseg4e16_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e16_v_u16m2x4_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlsseg4e32.c b/auto-generated/policy_funcs/llvm-api-tests/vlsseg4e32.c index f2fdfcd1e..c9a6a6c66 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlsseg4e32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlsseg4e32.c @@ -1,151 +1,220 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_tu(vfloat32mf2x4_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e32_v_f32mf2x4_tu(vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_tu(vfloat32m1x4_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_tu(vfloat32m1x4_t vd, const float *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e32_v_f32m1x4_tu(vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_tu(vfloat32m2x4_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_tu(vfloat32m2x4_t vd, const float *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e32_v_f32m2x4_tu(vd, rs1, rs2, vl); } -vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_tu(vint32mf2x4_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e32_v_i32mf2x4_tu(vd, rs1, rs2, vl); } -vint32m1x4_t test_vlsseg4e32_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e32_v_i32m1x4_tu(vd, rs1, rs2, vl); } -vint32m2x4_t test_vlsseg4e32_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e32_v_i32m2x4_tu(vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_tu(vuint32mf2x4_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e32_v_u32mf2x4_tu(vd, rs1, rs2, vl); } -vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_tu(vuint32m1x4_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e32_v_u32m1x4_tu(vd, rs1, rs2, vl); } -vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_tu(vuint32m2x4_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e32_v_u32m2x4_tu(vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e32_v_f32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e32_v_f32m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e32_v_f32m2x4_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e32_v_i32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vlsseg4e32_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e32_v_i32m1x4_tum(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vlsseg4e32_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e32_v_i32m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, + const uint32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e32_v_u32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e32_v_u32m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e32_v_u32m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_tumu(vbool64_t vm, + vfloat32mf2x4_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e32_v_f32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e32_v_f32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e32_v_f32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e32_v_i32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vlsseg4e32_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e32_v_i32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vlsseg4e32_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e32_v_i32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, + const uint32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e32_v_u32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e32_v_u32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e32_v_u32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2x4_t test_vlsseg4e32_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e32_v_f32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1x4_t test_vlsseg4e32_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e32_v_f32m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m2x4_t test_vlsseg4e32_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e32_v_f32m2x4_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2x4_t test_vlsseg4e32_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e32_v_i32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vlsseg4e32_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1x4_t test_vlsseg4e32_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e32_v_i32m1x4_mu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vlsseg4e32_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m2x4_t test_vlsseg4e32_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e32_v_i32m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2x4_t test_vlsseg4e32_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e32_v_u32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1x4_t test_vlsseg4e32_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e32_v_u32m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m2x4_t test_vlsseg4e32_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e32_v_u32m2x4_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlsseg4e64.c b/auto-generated/policy_funcs/llvm-api-tests/vlsseg4e64.c index 2b846b6ce..934bc5cf9 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlsseg4e64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlsseg4e64.c @@ -1,103 +1,149 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_tu(vfloat64m1x4_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_tu(vfloat64m1x4_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e64_v_f64m1x4_tu(vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_tu(vfloat64m2x4_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_tu(vfloat64m2x4_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e64_v_f64m2x4_tu(vd, rs1, rs2, vl); } -vint64m1x4_t test_vlsseg4e64_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e64_v_i64m1x4_tu(vd, rs1, rs2, vl); } -vint64m2x4_t test_vlsseg4e64_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e64_v_i64m2x4_tu(vd, rs1, rs2, vl); } -vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_tu(vuint64m1x4_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e64_v_u64m1x4_tu(vd, rs1, rs2, vl); } -vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_tu(vuint64m2x4_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e64_v_u64m2x4_tu(vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e64_v_f64m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e64_v_f64m2x4_tum(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vlsseg4e64_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e64_v_i64m1x4_tum(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vlsseg4e64_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e64_v_i64m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e64_v_u64m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e64_v_u64m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e64_v_f64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e64_v_f64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vlsseg4e64_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e64_v_i64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vlsseg4e64_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e64_v_i64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e64_v_u64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e64_v_u64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1x4_t test_vlsseg4e64_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e64_v_f64m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m2x4_t test_vlsseg4e64_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e64_v_f64m2x4_mu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vlsseg4e64_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1x4_t test_vlsseg4e64_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e64_v_i64m1x4_mu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vlsseg4e64_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m2x4_t test_vlsseg4e64_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e64_v_i64m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1x4_t test_vlsseg4e64_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e64_v_u64m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m2x4_t test_vlsseg4e64_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e64_v_u64m2x4_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlsseg4e8.c b/auto-generated/policy_funcs/llvm-api-tests/vlsseg4e8.c index 13bc0efe7..a450d8181 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlsseg4e8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlsseg4e8.c @@ -5,162 +5,232 @@ #include -vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e8_v_i8mf8x4_tu(vd, rs1, rs2, vl); } -vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e8_v_i8mf4x4_tu(vd, rs1, rs2, vl); } -vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e8_v_i8mf2x4_tu(vd, rs1, rs2, vl); } -vint8m1x4_t test_vlsseg4e8_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e8_v_i8m1x4_tu(vd, rs1, rs2, vl); } -vint8m2x4_t test_vlsseg4e8_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e8_v_i8m2x4_tu(vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e8_v_u8mf8x4_tu(vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e8_v_u8mf4x4_tu(vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e8_v_u8mf2x4_tu(vd, rs1, rs2, vl); } -vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e8_v_u8m1x4_tu(vd, rs1, rs2, vl); } -vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg4e8_v_u8m2x4_tu(vd, rs1, rs2, vl); } -vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e8_v_i8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e8_v_i8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e8_v_i8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vlsseg4e8_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e8_v_i8m1x4_tum(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vlsseg4e8_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e8_v_i8m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e8_v_u8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e8_v_u8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e8_v_u8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e8_v_u8m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e8_v_u8m2x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e8_v_i8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e8_v_i8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e8_v_i8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vlsseg4e8_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e8_v_i8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vlsseg4e8_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e8_v_i8m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e8_v_u8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e8_v_u8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e8_v_u8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e8_v_u8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e8_v_u8m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8x4_t test_vlsseg4e8_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e8_v_i8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4x4_t test_vlsseg4e8_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e8_v_i8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2x4_t test_vlsseg4e8_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e8_v_i8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vlsseg4e8_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1x4_t test_vlsseg4e8_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e8_v_i8m1x4_mu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vlsseg4e8_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m2x4_t test_vlsseg4e8_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e8_v_i8m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8x4_t test_vlsseg4e8_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e8_v_u8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4x4_t test_vlsseg4e8_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e8_v_u8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2x4_t test_vlsseg4e8_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e8_v_u8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1x4_t test_vlsseg4e8_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e8_v_u8m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m2x4_t test_vlsseg4e8_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg4e8_v_u8m2x4_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlsseg5e16.c b/auto-generated/policy_funcs/llvm-api-tests/vlsseg5e16.c index 99bedcb3a..aa1ee1912 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlsseg5e16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlsseg5e16.c @@ -1,151 +1,224 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_tu(vfloat16mf4x5_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg5e16_v_f16mf4x5_tu(vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_tu(vfloat16mf2x5_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg5e16_v_f16mf2x5_tu(vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_tu(vfloat16m1x5_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_tu(vfloat16m1x5_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e16_v_f16m1x5_tu(vd, rs1, rs2, vl); } -vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_tu(vint16mf4x5_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e16_v_i16mf4x5_tu(vd, rs1, rs2, vl); } -vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_tu(vint16mf2x5_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e16_v_i16mf2x5_tu(vd, rs1, rs2, vl); } -vint16m1x5_t test_vlsseg5e16_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg5e16_v_i16m1x5_tu(vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_tu(vuint16mf4x5_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e16_v_u16mf4x5_tu(vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_tu(vuint16mf2x5_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e16_v_u16mf2x5_tu(vd, rs1, rs2, vl); } -vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_tu(vuint16m1x5_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e16_v_u16m1x5_tu(vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg5e16_v_f16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg5e16_v_f16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e16_v_f16m1x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e16_v_i16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e16_v_i16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vlsseg5e16_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e16_v_i16m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, + const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg5e16_v_u16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, + const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg5e16_v_u16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e16_v_u16m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_tumu(vbool64_t vm, + vfloat16mf4x5_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg5e16_v_f16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_tumu(vbool32_t vm, + vfloat16mf2x5_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg5e16_v_f16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg5e16_v_f16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e16_v_i16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e16_v_i16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vlsseg5e16_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e16_v_i16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, + const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg5e16_v_u16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, + const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg5e16_v_u16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e16_v_u16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4x5_t test_vlsseg5e16_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg5e16_v_f16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2x5_t test_vlsseg5e16_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg5e16_v_f16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1x5_t test_vlsseg5e16_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e16_v_f16m1x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4x5_t test_vlsseg5e16_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e16_v_i16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2x5_t test_vlsseg5e16_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e16_v_i16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vlsseg5e16_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1x5_t test_vlsseg5e16_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e16_v_i16m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4x5_t test_vlsseg5e16_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e16_v_u16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2x5_t test_vlsseg5e16_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e16_v_u16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1x5_t test_vlsseg5e16_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e16_v_u16m1x5_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlsseg5e32.c b/auto-generated/policy_funcs/llvm-api-tests/vlsseg5e32.c index 6dabc54e9..52927a6eb 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlsseg5e32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlsseg5e32.c @@ -1,103 +1,150 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_tu(vfloat32mf2x5_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e32_v_f32mf2x5_tu(vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_tu(vfloat32m1x5_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_tu(vfloat32m1x5_t vd, const float *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg5e32_v_f32m1x5_tu(vd, rs1, rs2, vl); } -vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_tu(vint32mf2x5_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e32_v_i32mf2x5_tu(vd, rs1, rs2, vl); } -vint32m1x5_t test_vlsseg5e32_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg5e32_v_i32m1x5_tu(vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_tu(vuint32mf2x5_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e32_v_u32mf2x5_tu(vd, rs1, rs2, vl); } -vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_tu(vuint32m1x5_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e32_v_u32m1x5_tu(vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e32_v_f32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e32_v_f32m1x5_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e32_v_i32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vlsseg5e32_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e32_v_i32m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, + const uint32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg5e32_v_u32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e32_v_u32m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_tumu(vbool64_t vm, + vfloat32mf2x5_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e32_v_f32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e32_v_f32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e32_v_i32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vlsseg5e32_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e32_v_i32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, + const uint32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg5e32_v_u32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e32_v_u32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2x5_t test_vlsseg5e32_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e32_v_f32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1x5_t test_vlsseg5e32_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e32_v_f32m1x5_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2x5_t test_vlsseg5e32_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e32_v_i32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vlsseg5e32_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1x5_t test_vlsseg5e32_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e32_v_i32m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2x5_t test_vlsseg5e32_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e32_v_u32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1x5_t test_vlsseg5e32_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e32_v_u32m1x5_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlsseg5e64.c b/auto-generated/policy_funcs/llvm-api-tests/vlsseg5e64.c index f00726ec6..109f296ab 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlsseg5e64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlsseg5e64.c @@ -1,55 +1,78 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_tu(vfloat64m1x5_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_tu(vfloat64m1x5_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e64_v_f64m1x5_tu(vd, rs1, rs2, vl); } -vint64m1x5_t test_vlsseg5e64_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg5e64_v_i64m1x5_tu(vd, rs1, rs2, vl); } -vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_tu(vuint64m1x5_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e64_v_u64m1x5_tu(vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e64_v_f64m1x5_tum(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vlsseg5e64_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e64_v_i64m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e64_v_u64m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e64_v_f64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vlsseg5e64_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e64_v_i64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e64_v_u64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1x5_t test_vlsseg5e64_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e64_v_f64m1x5_mu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vlsseg5e64_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1x5_t test_vlsseg5e64_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e64_v_i64m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1x5_t test_vlsseg5e64_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e64_v_u64m1x5_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlsseg5e8.c b/auto-generated/policy_funcs/llvm-api-tests/vlsseg5e8.c index 7f44ffec4..4f457fb24 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlsseg5e8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlsseg5e8.c @@ -5,130 +5,186 @@ #include -vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg5e8_v_i8mf8x5_tu(vd, rs1, rs2, vl); } -vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg5e8_v_i8mf4x5_tu(vd, rs1, rs2, vl); } -vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg5e8_v_i8mf2x5_tu(vd, rs1, rs2, vl); } -vint8m1x5_t test_vlsseg5e8_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg5e8_v_i8m1x5_tu(vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg5e8_v_u8mf8x5_tu(vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg5e8_v_u8mf4x5_tu(vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg5e8_v_u8mf2x5_tu(vd, rs1, rs2, vl); } -vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg5e8_v_u8m1x5_tu(vd, rs1, rs2, vl); } -vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e8_v_i8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e8_v_i8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e8_v_i8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vlsseg5e8_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e8_v_i8m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e8_v_u8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e8_v_u8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e8_v_u8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e8_v_u8m1x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e8_v_i8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e8_v_i8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e8_v_i8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vlsseg5e8_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e8_v_i8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e8_v_u8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e8_v_u8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e8_v_u8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e8_v_u8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8x5_t test_vlsseg5e8_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e8_v_i8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4x5_t test_vlsseg5e8_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e8_v_i8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2x5_t test_vlsseg5e8_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e8_v_i8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vlsseg5e8_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1x5_t test_vlsseg5e8_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e8_v_i8m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8x5_t test_vlsseg5e8_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e8_v_u8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4x5_t test_vlsseg5e8_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e8_v_u8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2x5_t test_vlsseg5e8_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e8_v_u8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1x5_t test_vlsseg5e8_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg5e8_v_u8m1x5_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlsseg6e16.c b/auto-generated/policy_funcs/llvm-api-tests/vlsseg6e16.c index d18a0f3f0..0fd6217af 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlsseg6e16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlsseg6e16.c @@ -1,151 +1,224 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_tu(vfloat16mf4x6_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg6e16_v_f16mf4x6_tu(vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_tu(vfloat16mf2x6_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg6e16_v_f16mf2x6_tu(vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_tu(vfloat16m1x6_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_tu(vfloat16m1x6_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e16_v_f16m1x6_tu(vd, rs1, rs2, vl); } -vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_tu(vint16mf4x6_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e16_v_i16mf4x6_tu(vd, rs1, rs2, vl); } -vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_tu(vint16mf2x6_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e16_v_i16mf2x6_tu(vd, rs1, rs2, vl); } -vint16m1x6_t test_vlsseg6e16_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg6e16_v_i16m1x6_tu(vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_tu(vuint16mf4x6_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e16_v_u16mf4x6_tu(vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_tu(vuint16mf2x6_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e16_v_u16mf2x6_tu(vd, rs1, rs2, vl); } -vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_tu(vuint16m1x6_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e16_v_u16m1x6_tu(vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg6e16_v_f16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg6e16_v_f16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e16_v_f16m1x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e16_v_i16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e16_v_i16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vlsseg6e16_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e16_v_i16m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, + const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg6e16_v_u16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, + const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg6e16_v_u16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e16_v_u16m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_tumu(vbool64_t vm, + vfloat16mf4x6_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg6e16_v_f16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_tumu(vbool32_t vm, + vfloat16mf2x6_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg6e16_v_f16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg6e16_v_f16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e16_v_i16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e16_v_i16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vlsseg6e16_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e16_v_i16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, + const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg6e16_v_u16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, + const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg6e16_v_u16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e16_v_u16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4x6_t test_vlsseg6e16_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg6e16_v_f16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2x6_t test_vlsseg6e16_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg6e16_v_f16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1x6_t test_vlsseg6e16_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e16_v_f16m1x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4x6_t test_vlsseg6e16_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e16_v_i16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2x6_t test_vlsseg6e16_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e16_v_i16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vlsseg6e16_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1x6_t test_vlsseg6e16_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e16_v_i16m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4x6_t test_vlsseg6e16_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e16_v_u16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2x6_t test_vlsseg6e16_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e16_v_u16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1x6_t test_vlsseg6e16_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e16_v_u16m1x6_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlsseg6e32.c b/auto-generated/policy_funcs/llvm-api-tests/vlsseg6e32.c index f5f814c05..e0285e150 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlsseg6e32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlsseg6e32.c @@ -1,103 +1,150 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_tu(vfloat32mf2x6_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e32_v_f32mf2x6_tu(vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_tu(vfloat32m1x6_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_tu(vfloat32m1x6_t vd, const float *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg6e32_v_f32m1x6_tu(vd, rs1, rs2, vl); } -vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_tu(vint32mf2x6_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e32_v_i32mf2x6_tu(vd, rs1, rs2, vl); } -vint32m1x6_t test_vlsseg6e32_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg6e32_v_i32m1x6_tu(vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_tu(vuint32mf2x6_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e32_v_u32mf2x6_tu(vd, rs1, rs2, vl); } -vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_tu(vuint32m1x6_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e32_v_u32m1x6_tu(vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e32_v_f32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e32_v_f32m1x6_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e32_v_i32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vlsseg6e32_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e32_v_i32m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, + const uint32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg6e32_v_u32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e32_v_u32m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_tumu(vbool64_t vm, + vfloat32mf2x6_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e32_v_f32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e32_v_f32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e32_v_i32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vlsseg6e32_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e32_v_i32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, + const uint32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg6e32_v_u32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e32_v_u32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2x6_t test_vlsseg6e32_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e32_v_f32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1x6_t test_vlsseg6e32_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e32_v_f32m1x6_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2x6_t test_vlsseg6e32_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e32_v_i32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vlsseg6e32_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1x6_t test_vlsseg6e32_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e32_v_i32m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2x6_t test_vlsseg6e32_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e32_v_u32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1x6_t test_vlsseg6e32_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e32_v_u32m1x6_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlsseg6e64.c b/auto-generated/policy_funcs/llvm-api-tests/vlsseg6e64.c index 9549430d1..f5e86e1a4 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlsseg6e64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlsseg6e64.c @@ -1,55 +1,78 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_tu(vfloat64m1x6_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_tu(vfloat64m1x6_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e64_v_f64m1x6_tu(vd, rs1, rs2, vl); } -vint64m1x6_t test_vlsseg6e64_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg6e64_v_i64m1x6_tu(vd, rs1, rs2, vl); } -vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_tu(vuint64m1x6_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e64_v_u64m1x6_tu(vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e64_v_f64m1x6_tum(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vlsseg6e64_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e64_v_i64m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e64_v_u64m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e64_v_f64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vlsseg6e64_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e64_v_i64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e64_v_u64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1x6_t test_vlsseg6e64_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e64_v_f64m1x6_mu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vlsseg6e64_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1x6_t test_vlsseg6e64_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e64_v_i64m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1x6_t test_vlsseg6e64_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e64_v_u64m1x6_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlsseg6e8.c b/auto-generated/policy_funcs/llvm-api-tests/vlsseg6e8.c index 9b4a358e4..2ef419479 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlsseg6e8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlsseg6e8.c @@ -5,130 +5,186 @@ #include -vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg6e8_v_i8mf8x6_tu(vd, rs1, rs2, vl); } -vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg6e8_v_i8mf4x6_tu(vd, rs1, rs2, vl); } -vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg6e8_v_i8mf2x6_tu(vd, rs1, rs2, vl); } -vint8m1x6_t test_vlsseg6e8_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg6e8_v_i8m1x6_tu(vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg6e8_v_u8mf8x6_tu(vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg6e8_v_u8mf4x6_tu(vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg6e8_v_u8mf2x6_tu(vd, rs1, rs2, vl); } -vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg6e8_v_u8m1x6_tu(vd, rs1, rs2, vl); } -vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e8_v_i8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e8_v_i8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e8_v_i8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vlsseg6e8_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e8_v_i8m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e8_v_u8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e8_v_u8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e8_v_u8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e8_v_u8m1x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e8_v_i8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e8_v_i8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e8_v_i8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vlsseg6e8_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e8_v_i8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e8_v_u8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e8_v_u8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e8_v_u8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e8_v_u8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8x6_t test_vlsseg6e8_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e8_v_i8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4x6_t test_vlsseg6e8_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e8_v_i8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2x6_t test_vlsseg6e8_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e8_v_i8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vlsseg6e8_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1x6_t test_vlsseg6e8_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e8_v_i8m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8x6_t test_vlsseg6e8_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e8_v_u8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4x6_t test_vlsseg6e8_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e8_v_u8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2x6_t test_vlsseg6e8_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e8_v_u8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1x6_t test_vlsseg6e8_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg6e8_v_u8m1x6_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlsseg7e16.c b/auto-generated/policy_funcs/llvm-api-tests/vlsseg7e16.c index cf1e9c260..e7af8989e 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlsseg7e16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlsseg7e16.c @@ -1,151 +1,224 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_tu(vfloat16mf4x7_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg7e16_v_f16mf4x7_tu(vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_tu(vfloat16mf2x7_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg7e16_v_f16mf2x7_tu(vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_tu(vfloat16m1x7_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_tu(vfloat16m1x7_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e16_v_f16m1x7_tu(vd, rs1, rs2, vl); } -vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_tu(vint16mf4x7_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e16_v_i16mf4x7_tu(vd, rs1, rs2, vl); } -vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_tu(vint16mf2x7_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e16_v_i16mf2x7_tu(vd, rs1, rs2, vl); } -vint16m1x7_t test_vlsseg7e16_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg7e16_v_i16m1x7_tu(vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_tu(vuint16mf4x7_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e16_v_u16mf4x7_tu(vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_tu(vuint16mf2x7_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e16_v_u16mf2x7_tu(vd, rs1, rs2, vl); } -vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_tu(vuint16m1x7_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e16_v_u16m1x7_tu(vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg7e16_v_f16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg7e16_v_f16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e16_v_f16m1x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e16_v_i16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e16_v_i16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vlsseg7e16_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e16_v_i16m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, + const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg7e16_v_u16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, + const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg7e16_v_u16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e16_v_u16m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_tumu(vbool64_t vm, + vfloat16mf4x7_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg7e16_v_f16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_tumu(vbool32_t vm, + vfloat16mf2x7_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg7e16_v_f16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg7e16_v_f16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e16_v_i16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e16_v_i16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vlsseg7e16_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e16_v_i16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, + const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg7e16_v_u16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, + const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg7e16_v_u16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e16_v_u16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4x7_t test_vlsseg7e16_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg7e16_v_f16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2x7_t test_vlsseg7e16_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg7e16_v_f16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1x7_t test_vlsseg7e16_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e16_v_f16m1x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4x7_t test_vlsseg7e16_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e16_v_i16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2x7_t test_vlsseg7e16_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e16_v_i16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vlsseg7e16_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1x7_t test_vlsseg7e16_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e16_v_i16m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4x7_t test_vlsseg7e16_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e16_v_u16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2x7_t test_vlsseg7e16_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e16_v_u16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1x7_t test_vlsseg7e16_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e16_v_u16m1x7_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlsseg7e32.c b/auto-generated/policy_funcs/llvm-api-tests/vlsseg7e32.c index 74c6b4416..3e888e747 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlsseg7e32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlsseg7e32.c @@ -1,103 +1,150 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_tu(vfloat32mf2x7_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e32_v_f32mf2x7_tu(vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_tu(vfloat32m1x7_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_tu(vfloat32m1x7_t vd, const float *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg7e32_v_f32m1x7_tu(vd, rs1, rs2, vl); } -vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_tu(vint32mf2x7_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e32_v_i32mf2x7_tu(vd, rs1, rs2, vl); } -vint32m1x7_t test_vlsseg7e32_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg7e32_v_i32m1x7_tu(vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_tu(vuint32mf2x7_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e32_v_u32mf2x7_tu(vd, rs1, rs2, vl); } -vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_tu(vuint32m1x7_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e32_v_u32m1x7_tu(vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e32_v_f32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e32_v_f32m1x7_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e32_v_i32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vlsseg7e32_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e32_v_i32m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, + const uint32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg7e32_v_u32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e32_v_u32m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_tumu(vbool64_t vm, + vfloat32mf2x7_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e32_v_f32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e32_v_f32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e32_v_i32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vlsseg7e32_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e32_v_i32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, + const uint32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg7e32_v_u32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e32_v_u32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2x7_t test_vlsseg7e32_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e32_v_f32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1x7_t test_vlsseg7e32_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e32_v_f32m1x7_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2x7_t test_vlsseg7e32_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e32_v_i32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vlsseg7e32_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1x7_t test_vlsseg7e32_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e32_v_i32m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2x7_t test_vlsseg7e32_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e32_v_u32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1x7_t test_vlsseg7e32_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e32_v_u32m1x7_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlsseg7e64.c b/auto-generated/policy_funcs/llvm-api-tests/vlsseg7e64.c index 913cd8518..d451be79a 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlsseg7e64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlsseg7e64.c @@ -1,55 +1,78 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_tu(vfloat64m1x7_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_tu(vfloat64m1x7_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e64_v_f64m1x7_tu(vd, rs1, rs2, vl); } -vint64m1x7_t test_vlsseg7e64_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg7e64_v_i64m1x7_tu(vd, rs1, rs2, vl); } -vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_tu(vuint64m1x7_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e64_v_u64m1x7_tu(vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e64_v_f64m1x7_tum(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vlsseg7e64_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e64_v_i64m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e64_v_u64m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e64_v_f64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vlsseg7e64_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e64_v_i64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e64_v_u64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1x7_t test_vlsseg7e64_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e64_v_f64m1x7_mu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vlsseg7e64_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1x7_t test_vlsseg7e64_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e64_v_i64m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1x7_t test_vlsseg7e64_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e64_v_u64m1x7_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlsseg7e8.c b/auto-generated/policy_funcs/llvm-api-tests/vlsseg7e8.c index ff635dcdc..c206ae290 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlsseg7e8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlsseg7e8.c @@ -5,130 +5,186 @@ #include -vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg7e8_v_i8mf8x7_tu(vd, rs1, rs2, vl); } -vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg7e8_v_i8mf4x7_tu(vd, rs1, rs2, vl); } -vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg7e8_v_i8mf2x7_tu(vd, rs1, rs2, vl); } -vint8m1x7_t test_vlsseg7e8_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg7e8_v_i8m1x7_tu(vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg7e8_v_u8mf8x7_tu(vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg7e8_v_u8mf4x7_tu(vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg7e8_v_u8mf2x7_tu(vd, rs1, rs2, vl); } -vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg7e8_v_u8m1x7_tu(vd, rs1, rs2, vl); } -vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e8_v_i8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e8_v_i8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e8_v_i8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vlsseg7e8_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e8_v_i8m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e8_v_u8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e8_v_u8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e8_v_u8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e8_v_u8m1x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e8_v_i8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e8_v_i8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e8_v_i8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vlsseg7e8_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e8_v_i8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e8_v_u8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e8_v_u8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e8_v_u8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e8_v_u8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8x7_t test_vlsseg7e8_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e8_v_i8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4x7_t test_vlsseg7e8_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e8_v_i8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2x7_t test_vlsseg7e8_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e8_v_i8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vlsseg7e8_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1x7_t test_vlsseg7e8_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e8_v_i8m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8x7_t test_vlsseg7e8_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e8_v_u8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4x7_t test_vlsseg7e8_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e8_v_u8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2x7_t test_vlsseg7e8_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e8_v_u8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1x7_t test_vlsseg7e8_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg7e8_v_u8m1x7_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlsseg8e16.c b/auto-generated/policy_funcs/llvm-api-tests/vlsseg8e16.c index 93402af40..33e20d89b 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlsseg8e16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlsseg8e16.c @@ -1,151 +1,224 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_tu(vfloat16mf4x8_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg8e16_v_f16mf4x8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_tu(vfloat16mf2x8_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg8e16_v_f16mf2x8_tu(vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_tu(vfloat16m1x8_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_tu(vfloat16m1x8_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e16_v_f16m1x8_tu(vd, rs1, rs2, vl); } -vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_tu(vint16mf4x8_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e16_v_i16mf4x8_tu(vd, rs1, rs2, vl); } -vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_tu(vint16mf2x8_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e16_v_i16mf2x8_tu(vd, rs1, rs2, vl); } -vint16m1x8_t test_vlsseg8e16_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg8e16_v_i16m1x8_tu(vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_tu(vuint16mf4x8_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e16_v_u16mf4x8_tu(vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_tu(vuint16mf2x8_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e16_v_u16mf2x8_tu(vd, rs1, rs2, vl); } -vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_tu(vuint16m1x8_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e16_v_u16m1x8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg8e16_v_f16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg8e16_v_f16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e16_v_f16m1x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e16_v_i16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e16_v_i16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vlsseg8e16_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e16_v_i16m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, + const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg8e16_v_u16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, + const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg8e16_v_u16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e16_v_u16m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_tumu(vbool64_t vm, + vfloat16mf4x8_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg8e16_v_f16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_tumu(vbool32_t vm, + vfloat16mf2x8_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg8e16_v_f16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg8e16_v_f16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e16_v_i16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e16_v_i16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vlsseg8e16_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e16_v_i16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, + const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg8e16_v_u16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, + const uint16_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg8e16_v_u16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e16_v_u16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf4x8_t test_vlsseg8e16_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg8e16_v_f16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16mf2x8_t test_vlsseg8e16_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, + const _Float16 *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg8e16_v_f16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, ptrdiff_t rs2, size_t vl) { +vfloat16m1x8_t test_vlsseg8e16_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e16_v_f16m1x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf4x8_t test_vlsseg8e16_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e16_v_i16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16mf2x8_t test_vlsseg8e16_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e16_v_i16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vlsseg8e16_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, ptrdiff_t rs2, size_t vl) { +vint16m1x8_t test_vlsseg8e16_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e16_v_i16m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf4x8_t test_vlsseg8e16_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e16_v_u16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16mf2x8_t test_vlsseg8e16_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e16_v_u16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint16m1x8_t test_vlsseg8e16_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e16_v_u16m1x8_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlsseg8e32.c b/auto-generated/policy_funcs/llvm-api-tests/vlsseg8e32.c index 869989f1c..8f35c3615 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlsseg8e32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlsseg8e32.c @@ -1,103 +1,150 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_tu(vfloat32mf2x8_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e32_v_f32mf2x8_tu(vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_tu(vfloat32m1x8_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_tu(vfloat32m1x8_t vd, const float *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg8e32_v_f32m1x8_tu(vd, rs1, rs2, vl); } -vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_tu(vint32mf2x8_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e32_v_i32mf2x8_tu(vd, rs1, rs2, vl); } -vint32m1x8_t test_vlsseg8e32_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg8e32_v_i32m1x8_tu(vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_tu(vuint32mf2x8_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e32_v_u32mf2x8_tu(vd, rs1, rs2, vl); } -vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_tu(vuint32m1x8_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e32_v_u32m1x8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e32_v_f32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e32_v_f32m1x8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e32_v_i32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vlsseg8e32_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e32_v_i32m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, + const uint32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg8e32_v_u32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e32_v_u32m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_tumu(vbool64_t vm, + vfloat32mf2x8_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e32_v_f32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e32_v_f32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e32_v_i32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vlsseg8e32_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e32_v_i32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, + const uint32_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg8e32_v_u32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e32_v_u32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32mf2x8_t test_vlsseg8e32_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e32_v_f32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, ptrdiff_t rs2, size_t vl) { +vfloat32m1x8_t test_vlsseg8e32_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e32_v_f32m1x8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32mf2x8_t test_vlsseg8e32_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e32_v_i32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vlsseg8e32_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, ptrdiff_t rs2, size_t vl) { +vint32m1x8_t test_vlsseg8e32_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e32_v_i32m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32mf2x8_t test_vlsseg8e32_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e32_v_u32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint32m1x8_t test_vlsseg8e32_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e32_v_u32m1x8_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlsseg8e64.c b/auto-generated/policy_funcs/llvm-api-tests/vlsseg8e64.c index b56e01673..6294317ae 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlsseg8e64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlsseg8e64.c @@ -1,55 +1,78 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_tu(vfloat64m1x8_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_tu(vfloat64m1x8_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e64_v_f64m1x8_tu(vd, rs1, rs2, vl); } -vint64m1x8_t test_vlsseg8e64_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg8e64_v_i64m1x8_tu(vd, rs1, rs2, vl); } -vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_tu(vuint64m1x8_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e64_v_u64m1x8_tu(vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e64_v_f64m1x8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vlsseg8e64_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e64_v_i64m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e64_v_u64m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e64_v_f64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vlsseg8e64_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e64_v_i64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e64_v_u64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, ptrdiff_t rs2, size_t vl) { +vfloat64m1x8_t test_vlsseg8e64_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e64_v_f64m1x8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vlsseg8e64_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, ptrdiff_t rs2, size_t vl) { +vint64m1x8_t test_vlsseg8e64_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e64_v_i64m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint64m1x8_t test_vlsseg8e64_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e64_v_u64m1x8_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vlsseg8e8.c b/auto-generated/policy_funcs/llvm-api-tests/vlsseg8e8.c index 6f2e63124..8a833e78d 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vlsseg8e8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vlsseg8e8.c @@ -5,130 +5,186 @@ #include -vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg8e8_v_i8mf8x8_tu(vd, rs1, rs2, vl); } -vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg8e8_v_i8mf4x8_tu(vd, rs1, rs2, vl); } -vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg8e8_v_i8mf2x8_tu(vd, rs1, rs2, vl); } -vint8m1x8_t test_vlsseg8e8_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg8e8_v_i8m1x8_tu(vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg8e8_v_u8mf8x8_tu(vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg8e8_v_u8mf4x8_tu(vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg8e8_v_u8mf2x8_tu(vd, rs1, rs2, vl); } -vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, + ptrdiff_t rs2, size_t vl) { return __riscv_vlsseg8e8_v_u8m1x8_tu(vd, rs1, rs2, vl); } -vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e8_v_i8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e8_v_i8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e8_v_i8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vlsseg8e8_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e8_v_i8m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e8_v_u8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e8_v_u8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e8_v_u8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e8_v_u8m1x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e8_v_i8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e8_v_i8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e8_v_i8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vlsseg8e8_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e8_v_i8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e8_v_u8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e8_v_u8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e8_v_u8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e8_v_u8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf8x8_t test_vlsseg8e8_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e8_v_i8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf4x8_t test_vlsseg8e8_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e8_v_i8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8mf2x8_t test_vlsseg8e8_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e8_v_i8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vlsseg8e8_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, ptrdiff_t rs2, size_t vl) { +vint8m1x8_t test_vlsseg8e8_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e8_v_i8m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf8x8_t test_vlsseg8e8_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e8_v_u8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf4x8_t test_vlsseg8e8_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e8_v_u8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8mf2x8_t test_vlsseg8e8_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e8_v_u8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, ptrdiff_t rs2, size_t vl) { +vuint8m1x8_t test_vlsseg8e8_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, ptrdiff_t rs2, + size_t vl) { return __riscv_vlsseg8e8_v_u8m1x8_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxei16.c b/auto-generated/policy_funcs/llvm-api-tests/vluxei16.c index 5c345894f..9e4ac73c4 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxei16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxei16.c @@ -1,919 +1,1312 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vluxei16_v_f16mf4_tu(vfloat16mf4_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4_t test_vluxei16_v_f16mf4_tu(vfloat16mf4_t vd, const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxei16_v_f16mf4_tu(vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei16_v_f16mf2_tu(vfloat16mf2_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2_t test_vluxei16_v_f16mf2_tu(vfloat16mf2_t vd, const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxei16_v_f16mf2_tu(vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei16_v_f16m1_tu(vfloat16m1_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1_t test_vluxei16_v_f16m1_tu(vfloat16m1_t vd, const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxei16_v_f16m1_tu(vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei16_v_f16m2_tu(vfloat16m2_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2_t test_vluxei16_v_f16m2_tu(vfloat16m2_t vd, const _Float16 *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxei16_v_f16m2_tu(vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei16_v_f16m4_tu(vfloat16m4_t vd, const _Float16 *rs1, vuint16m4_t rs2, size_t vl) { +vfloat16m4_t test_vluxei16_v_f16m4_tu(vfloat16m4_t vd, const _Float16 *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vluxei16_v_f16m4_tu(vd, rs1, rs2, vl); } -vfloat16m8_t test_vluxei16_v_f16m8_tu(vfloat16m8_t vd, const _Float16 *rs1, vuint16m8_t rs2, size_t vl) { +vfloat16m8_t test_vluxei16_v_f16m8_tu(vfloat16m8_t vd, const _Float16 *rs1, + vuint16m8_t rs2, size_t vl) { return __riscv_vluxei16_v_f16m8_tu(vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei16_v_f32mf2_tu(vfloat32mf2_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2_t test_vluxei16_v_f32mf2_tu(vfloat32mf2_t vd, const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxei16_v_f32mf2_tu(vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei16_v_f32m1_tu(vfloat32m1_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1_t test_vluxei16_v_f32m1_tu(vfloat32m1_t vd, const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxei16_v_f32m1_tu(vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei16_v_f32m2_tu(vfloat32m2_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2_t test_vluxei16_v_f32m2_tu(vfloat32m2_t vd, const float *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxei16_v_f32m2_tu(vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei16_v_f32m4_tu(vfloat32m4_t vd, const float *rs1, vuint16m2_t rs2, size_t vl) { +vfloat32m4_t test_vluxei16_v_f32m4_tu(vfloat32m4_t vd, const float *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxei16_v_f32m4_tu(vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei16_v_f32m8_tu(vfloat32m8_t vd, const float *rs1, vuint16m4_t rs2, size_t vl) { +vfloat32m8_t test_vluxei16_v_f32m8_tu(vfloat32m8_t vd, const float *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vluxei16_v_f32m8_tu(vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei16_v_f64m1_tu(vfloat64m1_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1_t test_vluxei16_v_f64m1_tu(vfloat64m1_t vd, const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxei16_v_f64m1_tu(vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei16_v_f64m2_tu(vfloat64m2_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2_t test_vluxei16_v_f64m2_tu(vfloat64m2_t vd, const double *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxei16_v_f64m2_tu(vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei16_v_f64m4_tu(vfloat64m4_t vd, const double *rs1, vuint16m1_t rs2, size_t vl) { +vfloat64m4_t test_vluxei16_v_f64m4_tu(vfloat64m4_t vd, const double *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxei16_v_f64m4_tu(vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei16_v_f64m8_tu(vfloat64m8_t vd, const double *rs1, vuint16m2_t rs2, size_t vl) { +vfloat64m8_t test_vluxei16_v_f64m8_tu(vfloat64m8_t vd, const double *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxei16_v_f64m8_tu(vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei16_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8_t test_vluxei16_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxei16_v_i8mf8_tu(vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei16_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4_t test_vluxei16_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxei16_v_i8mf4_tu(vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei16_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2_t test_vluxei16_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxei16_v_i8mf2_tu(vd, rs1, rs2, vl); } -vint8m1_t test_vluxei16_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1_t test_vluxei16_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxei16_v_i8m1_tu(vd, rs1, rs2, vl); } -vint8m2_t test_vluxei16_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2_t test_vluxei16_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vluxei16_v_i8m2_tu(vd, rs1, rs2, vl); } -vint8m4_t test_vluxei16_v_i8m4_tu(vint8m4_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { +vint8m4_t test_vluxei16_v_i8m4_tu(vint8m4_t vd, const int8_t *rs1, + vuint16m8_t rs2, size_t vl) { return __riscv_vluxei16_v_i8m4_tu(vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei16_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4_t test_vluxei16_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxei16_v_i16mf4_tu(vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei16_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2_t test_vluxei16_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxei16_v_i16mf2_tu(vd, rs1, rs2, vl); } -vint16m1_t test_vluxei16_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1_t test_vluxei16_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxei16_v_i16m1_tu(vd, rs1, rs2, vl); } -vint16m2_t test_vluxei16_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2_t test_vluxei16_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxei16_v_i16m2_tu(vd, rs1, rs2, vl); } -vint16m4_t test_vluxei16_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { +vint16m4_t test_vluxei16_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vluxei16_v_i16m4_tu(vd, rs1, rs2, vl); } -vint16m8_t test_vluxei16_v_i16m8_tu(vint16m8_t vd, const int16_t *rs1, vuint16m8_t rs2, size_t vl) { +vint16m8_t test_vluxei16_v_i16m8_tu(vint16m8_t vd, const int16_t *rs1, + vuint16m8_t rs2, size_t vl) { return __riscv_vluxei16_v_i16m8_tu(vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei16_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2_t test_vluxei16_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxei16_v_i32mf2_tu(vd, rs1, rs2, vl); } -vint32m1_t test_vluxei16_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1_t test_vluxei16_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxei16_v_i32m1_tu(vd, rs1, rs2, vl); } -vint32m2_t test_vluxei16_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2_t test_vluxei16_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxei16_v_i32m2_tu(vd, rs1, rs2, vl); } -vint32m4_t test_vluxei16_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { +vint32m4_t test_vluxei16_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxei16_v_i32m4_tu(vd, rs1, rs2, vl); } -vint32m8_t test_vluxei16_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, vuint16m4_t rs2, size_t vl) { +vint32m8_t test_vluxei16_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vluxei16_v_i32m8_tu(vd, rs1, rs2, vl); } -vint64m1_t test_vluxei16_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1_t test_vluxei16_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxei16_v_i64m1_tu(vd, rs1, rs2, vl); } -vint64m2_t test_vluxei16_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2_t test_vluxei16_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxei16_v_i64m2_tu(vd, rs1, rs2, vl); } -vint64m4_t test_vluxei16_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { +vint64m4_t test_vluxei16_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxei16_v_i64m4_tu(vd, rs1, rs2, vl); } -vint64m8_t test_vluxei16_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, vuint16m2_t rs2, size_t vl) { +vint64m8_t test_vluxei16_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxei16_v_i64m8_tu(vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei16_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8_t test_vluxei16_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxei16_v_u8mf8_tu(vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei16_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4_t test_vluxei16_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxei16_v_u8mf4_tu(vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei16_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2_t test_vluxei16_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxei16_v_u8mf2_tu(vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei16_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1_t test_vluxei16_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxei16_v_u8m1_tu(vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei16_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2_t test_vluxei16_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vluxei16_v_u8m2_tu(vd, rs1, rs2, vl); } -vuint8m4_t test_vluxei16_v_u8m4_tu(vuint8m4_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { +vuint8m4_t test_vluxei16_v_u8m4_tu(vuint8m4_t vd, const uint8_t *rs1, + vuint16m8_t rs2, size_t vl) { return __riscv_vluxei16_v_u8m4_tu(vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei16_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4_t test_vluxei16_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxei16_v_u16mf4_tu(vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei16_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2_t test_vluxei16_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxei16_v_u16mf2_tu(vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei16_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1_t test_vluxei16_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxei16_v_u16m1_tu(vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei16_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2_t test_vluxei16_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxei16_v_u16m2_tu(vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei16_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint16m4_t test_vluxei16_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vluxei16_v_u16m4_tu(vd, rs1, rs2, vl); } -vuint16m8_t test_vluxei16_v_u16m8_tu(vuint16m8_t vd, const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { +vuint16m8_t test_vluxei16_v_u16m8_tu(vuint16m8_t vd, const uint16_t *rs1, + vuint16m8_t rs2, size_t vl) { return __riscv_vluxei16_v_u16m8_tu(vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei16_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2_t test_vluxei16_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxei16_v_u32mf2_tu(vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei16_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1_t test_vluxei16_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxei16_v_u32m1_tu(vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei16_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2_t test_vluxei16_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxei16_v_u32m2_tu(vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei16_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint32m4_t test_vluxei16_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxei16_v_u32m4_tu(vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei16_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint32m8_t test_vluxei16_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vluxei16_v_u32m8_tu(vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei16_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1_t test_vluxei16_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxei16_v_u64m1_tu(vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei16_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2_t test_vluxei16_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxei16_v_u64m2_tu(vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei16_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint64m4_t test_vluxei16_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxei16_v_u64m4_tu(vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei16_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint64m8_t test_vluxei16_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxei16_v_u64m8_tu(vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei16_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4_t test_vluxei16_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_f16mf4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei16_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2_t test_vluxei16_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_f16mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei16_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1_t test_vluxei16_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_f16m1_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei16_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2_t test_vluxei16_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxei16_v_f16m2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei16_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, const _Float16 *rs1, vuint16m4_t rs2, size_t vl) { +vfloat16m4_t test_vluxei16_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + const _Float16 *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxei16_v_f16m4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vluxei16_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, const _Float16 *rs1, vuint16m8_t rs2, size_t vl) { +vfloat16m8_t test_vluxei16_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + const _Float16 *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vluxei16_v_f16m8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei16_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2_t test_vluxei16_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_f32mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei16_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1_t test_vluxei16_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_f32m1_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei16_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2_t test_vluxei16_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_f32m2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei16_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float *rs1, vuint16m2_t rs2, size_t vl) { +vfloat32m4_t test_vluxei16_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxei16_v_f32m4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei16_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, const float *rs1, vuint16m4_t rs2, size_t vl) { +vfloat32m8_t test_vluxei16_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + const float *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxei16_v_f32m8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei16_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1_t test_vluxei16_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_f64m1_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei16_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2_t test_vluxei16_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_f64m2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei16_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const double *rs1, vuint16m1_t rs2, size_t vl) { +vfloat64m4_t test_vluxei16_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_f64m4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei16_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const double *rs1, vuint16m2_t rs2, size_t vl) { +vfloat64m8_t test_vluxei16_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxei16_v_f64m8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei16_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8_t test_vluxei16_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + const int8_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_i8mf8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei16_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4_t test_vluxei16_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + const int8_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_i8mf4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei16_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2_t test_vluxei16_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_i8mf2_tum(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei16_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1_t test_vluxei16_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxei16_v_i8m1_tum(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vluxei16_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2_t test_vluxei16_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vluxei16_v_i8m2_tum(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vluxei16_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { +vint8m4_t test_vluxei16_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, + vuint16m8_t rs2, size_t vl) { return __riscv_vluxei16_v_i8m4_tum(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei16_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4_t test_vluxei16_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_i16mf4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei16_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2_t test_vluxei16_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_i16mf2_tum(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei16_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1_t test_vluxei16_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_i16m1_tum(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei16_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2_t test_vluxei16_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxei16_v_i16m2_tum(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vluxei16_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { +vint16m4_t test_vluxei16_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, + const int16_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxei16_v_i16m4_tum(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vluxei16_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint16m8_t rs2, size_t vl) { +vint16m8_t test_vluxei16_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, + const int16_t *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vluxei16_v_i16m8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei16_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2_t test_vluxei16_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_i32mf2_tum(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei16_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1_t test_vluxei16_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_i32m1_tum(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei16_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2_t test_vluxei16_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_i32m2_tum(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei16_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { +vint32m4_t test_vluxei16_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxei16_v_i32m4_tum(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vluxei16_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint16m4_t rs2, size_t vl) { +vint32m8_t test_vluxei16_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, + const int32_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxei16_v_i32m8_tum(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei16_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1_t test_vluxei16_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_i64m1_tum(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei16_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2_t test_vluxei16_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_i64m2_tum(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei16_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { +vint64m4_t test_vluxei16_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_i64m4_tum(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei16_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint16m2_t rs2, size_t vl) { +vint64m8_t test_vluxei16_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxei16_v_i64m8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei16_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8_t test_vluxei16_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_u8mf8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei16_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4_t test_vluxei16_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_u8mf4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei16_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2_t test_vluxei16_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_u8mf2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei16_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1_t test_vluxei16_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxei16_v_u8m1_tum(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei16_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2_t test_vluxei16_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, + const uint8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxei16_v_u8m2_tum(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vluxei16_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { +vuint8m4_t test_vluxei16_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, + const uint8_t *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vluxei16_v_u8m4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei16_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4_t test_vluxei16_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_u16mf4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei16_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2_t test_vluxei16_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_u16mf2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei16_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1_t test_vluxei16_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_u16m1_tum(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei16_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2_t test_vluxei16_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxei16_v_u16m2_tum(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei16_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint16m4_t test_vluxei16_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + const uint16_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxei16_v_u16m4_tum(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vluxei16_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { +vuint16m8_t test_vluxei16_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + const uint16_t *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vluxei16_v_u16m8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei16_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2_t test_vluxei16_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_u32mf2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei16_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1_t test_vluxei16_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_u32m1_tum(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei16_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2_t test_vluxei16_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_u32m2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei16_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint32m4_t test_vluxei16_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxei16_v_u32m4_tum(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei16_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint32m8_t test_vluxei16_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + const uint32_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxei16_v_u32m8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei16_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1_t test_vluxei16_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_u64m1_tum(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei16_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2_t test_vluxei16_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_u64m2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei16_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint64m4_t test_vluxei16_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_u64m4_tum(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei16_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint64m8_t test_vluxei16_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxei16_v_u64m8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei16_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4_t test_vluxei16_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_f16mf4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei16_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2_t test_vluxei16_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_f16mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei16_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1_t test_vluxei16_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_f16m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei16_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2_t test_vluxei16_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxei16_v_f16m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei16_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, const _Float16 *rs1, vuint16m4_t rs2, size_t vl) { +vfloat16m4_t test_vluxei16_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + const _Float16 *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxei16_v_f16m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vluxei16_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, const _Float16 *rs1, vuint16m8_t rs2, size_t vl) { +vfloat16m8_t test_vluxei16_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + const _Float16 *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vluxei16_v_f16m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei16_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2_t test_vluxei16_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_f32mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei16_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1_t test_vluxei16_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_f32m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei16_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2_t test_vluxei16_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_f32m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei16_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float *rs1, vuint16m2_t rs2, size_t vl) { +vfloat32m4_t test_vluxei16_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxei16_v_f32m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei16_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, const float *rs1, vuint16m4_t rs2, size_t vl) { +vfloat32m8_t test_vluxei16_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + const float *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxei16_v_f32m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei16_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1_t test_vluxei16_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_f64m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei16_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2_t test_vluxei16_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_f64m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei16_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const double *rs1, vuint16m1_t rs2, size_t vl) { +vfloat64m4_t test_vluxei16_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_f64m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei16_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const double *rs1, vuint16m2_t rs2, size_t vl) { +vfloat64m8_t test_vluxei16_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxei16_v_f64m8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei16_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8_t test_vluxei16_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + const int8_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_i8mf8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei16_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4_t test_vluxei16_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + const int8_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_i8mf4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei16_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2_t test_vluxei16_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_i8mf2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei16_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1_t test_vluxei16_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxei16_v_i8m1_tumu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vluxei16_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2_t test_vluxei16_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, + const int8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxei16_v_i8m2_tumu(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vluxei16_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { +vint8m4_t test_vluxei16_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, + const int8_t *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vluxei16_v_i8m4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei16_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4_t test_vluxei16_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_i16mf4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei16_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2_t test_vluxei16_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_i16mf2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei16_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1_t test_vluxei16_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_i16m1_tumu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei16_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2_t test_vluxei16_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxei16_v_i16m2_tumu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vluxei16_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { +vint16m4_t test_vluxei16_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, + const int16_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxei16_v_i16m4_tumu(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vluxei16_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint16m8_t rs2, size_t vl) { +vint16m8_t test_vluxei16_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, + const int16_t *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vluxei16_v_i16m8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei16_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2_t test_vluxei16_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_i32mf2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei16_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1_t test_vluxei16_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_i32m1_tumu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei16_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2_t test_vluxei16_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_i32m2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei16_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { +vint32m4_t test_vluxei16_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxei16_v_i32m4_tumu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vluxei16_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint16m4_t rs2, size_t vl) { +vint32m8_t test_vluxei16_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, + const int32_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxei16_v_i32m8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei16_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1_t test_vluxei16_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_i64m1_tumu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei16_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2_t test_vluxei16_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_i64m2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei16_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { +vint64m4_t test_vluxei16_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_i64m4_tumu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei16_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint16m2_t rs2, size_t vl) { +vint64m8_t test_vluxei16_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxei16_v_i64m8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei16_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8_t test_vluxei16_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_u8mf8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei16_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4_t test_vluxei16_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_u8mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei16_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2_t test_vluxei16_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_u8mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei16_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1_t test_vluxei16_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxei16_v_u8m1_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei16_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2_t test_vluxei16_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + const uint8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxei16_v_u8m2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vluxei16_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { +vuint8m4_t test_vluxei16_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + const uint8_t *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vluxei16_v_u8m4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei16_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4_t test_vluxei16_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_u16mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei16_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2_t test_vluxei16_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_u16mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei16_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1_t test_vluxei16_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_u16m1_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei16_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2_t test_vluxei16_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxei16_v_u16m2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei16_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint16m4_t test_vluxei16_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + const uint16_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxei16_v_u16m4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vluxei16_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { +vuint16m8_t test_vluxei16_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + const uint16_t *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vluxei16_v_u16m8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei16_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2_t test_vluxei16_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_u32mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei16_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1_t test_vluxei16_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_u32m1_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei16_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2_t test_vluxei16_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_u32m2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei16_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint32m4_t test_vluxei16_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxei16_v_u32m4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei16_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint32m8_t test_vluxei16_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + const uint32_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxei16_v_u32m8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei16_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1_t test_vluxei16_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_u64m1_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei16_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2_t test_vluxei16_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_u64m2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei16_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint64m4_t test_vluxei16_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_u64m4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei16_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint64m8_t test_vluxei16_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxei16_v_u64m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei16_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4_t test_vluxei16_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_f16mf4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei16_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2_t test_vluxei16_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_f16mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei16_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1_t test_vluxei16_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_f16m1_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei16_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2_t test_vluxei16_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxei16_v_f16m2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei16_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, const _Float16 *rs1, vuint16m4_t rs2, size_t vl) { +vfloat16m4_t test_vluxei16_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + const _Float16 *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxei16_v_f16m4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vluxei16_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, const _Float16 *rs1, vuint16m8_t rs2, size_t vl) { +vfloat16m8_t test_vluxei16_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + const _Float16 *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vluxei16_v_f16m8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei16_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2_t test_vluxei16_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_f32mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei16_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1_t test_vluxei16_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_f32m1_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei16_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2_t test_vluxei16_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_f32m2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei16_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float *rs1, vuint16m2_t rs2, size_t vl) { +vfloat32m4_t test_vluxei16_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxei16_v_f32m4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei16_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, const float *rs1, vuint16m4_t rs2, size_t vl) { +vfloat32m8_t test_vluxei16_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + const float *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxei16_v_f32m8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei16_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1_t test_vluxei16_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_f64m1_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei16_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2_t test_vluxei16_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_f64m2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei16_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const double *rs1, vuint16m1_t rs2, size_t vl) { +vfloat64m4_t test_vluxei16_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_f64m4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei16_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const double *rs1, vuint16m2_t rs2, size_t vl) { +vfloat64m8_t test_vluxei16_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxei16_v_f64m8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei16_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8_t test_vluxei16_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, + const int8_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_i8mf8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei16_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4_t test_vluxei16_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, + const int8_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_i8mf4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei16_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2_t test_vluxei16_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_i8mf2_mu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei16_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1_t test_vluxei16_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxei16_v_i8m1_mu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vluxei16_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2_t test_vluxei16_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vluxei16_v_i8m2_mu(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vluxei16_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { +vint8m4_t test_vluxei16_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, + vuint16m8_t rs2, size_t vl) { return __riscv_vluxei16_v_i8m4_mu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei16_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4_t test_vluxei16_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_i16mf4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei16_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2_t test_vluxei16_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_i16mf2_mu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei16_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1_t test_vluxei16_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_i16m1_mu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei16_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2_t test_vluxei16_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxei16_v_i16m2_mu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vluxei16_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { +vint16m4_t test_vluxei16_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, + const int16_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxei16_v_i16m4_mu(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vluxei16_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint16m8_t rs2, size_t vl) { +vint16m8_t test_vluxei16_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, + const int16_t *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vluxei16_v_i16m8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei16_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2_t test_vluxei16_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_i32mf2_mu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei16_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1_t test_vluxei16_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_i32m1_mu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei16_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2_t test_vluxei16_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_i32m2_mu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei16_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { +vint32m4_t test_vluxei16_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxei16_v_i32m4_mu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vluxei16_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint16m4_t rs2, size_t vl) { +vint32m8_t test_vluxei16_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, + const int32_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxei16_v_i32m8_mu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei16_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1_t test_vluxei16_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_i64m1_mu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei16_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2_t test_vluxei16_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_i64m2_mu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei16_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { +vint64m4_t test_vluxei16_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_i64m4_mu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei16_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint16m2_t rs2, size_t vl) { +vint64m8_t test_vluxei16_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxei16_v_i64m8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei16_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8_t test_vluxei16_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_u8mf8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei16_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4_t test_vluxei16_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_u8mf4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei16_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2_t test_vluxei16_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_u8mf2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei16_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1_t test_vluxei16_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxei16_v_u8m1_mu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei16_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2_t test_vluxei16_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, + const uint8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxei16_v_u8m2_mu(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vluxei16_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { +vuint8m4_t test_vluxei16_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, + const uint8_t *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vluxei16_v_u8m4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei16_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4_t test_vluxei16_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_u16mf4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei16_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2_t test_vluxei16_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_u16mf2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei16_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1_t test_vluxei16_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_u16m1_mu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei16_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2_t test_vluxei16_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxei16_v_u16m2_mu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei16_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint16m4_t test_vluxei16_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + const uint16_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxei16_v_u16m4_mu(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vluxei16_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint16m8_t rs2, size_t vl) { +vuint16m8_t test_vluxei16_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + const uint16_t *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vluxei16_v_u16m8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei16_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2_t test_vluxei16_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_u32mf2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei16_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1_t test_vluxei16_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_u32m1_mu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei16_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2_t test_vluxei16_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_u32m2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei16_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint32m4_t test_vluxei16_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxei16_v_u32m4_mu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei16_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint32m8_t test_vluxei16_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + const uint32_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxei16_v_u32m8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei16_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1_t test_vluxei16_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxei16_v_u64m1_mu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei16_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2_t test_vluxei16_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxei16_v_u64m2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei16_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint64m4_t test_vluxei16_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxei16_v_u64m4_mu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei16_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint64m8_t test_vluxei16_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxei16_v_u64m8_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxei32.c b/auto-generated/policy_funcs/llvm-api-tests/vluxei32.c index a89e67483..f0b62b910 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxei32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxei32.c @@ -1,839 +1,1199 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vluxei32_v_f16mf4_tu(vfloat16mf4_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4_t test_vluxei32_v_f16mf4_tu(vfloat16mf4_t vd, const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxei32_v_f16mf4_tu(vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei32_v_f16mf2_tu(vfloat16mf2_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2_t test_vluxei32_v_f16mf2_tu(vfloat16mf2_t vd, const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxei32_v_f16mf2_tu(vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei32_v_f16m1_tu(vfloat16m1_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1_t test_vluxei32_v_f16m1_tu(vfloat16m1_t vd, const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxei32_v_f16m1_tu(vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei32_v_f16m2_tu(vfloat16m2_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2_t test_vluxei32_v_f16m2_tu(vfloat16m2_t vd, const _Float16 *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxei32_v_f16m2_tu(vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei32_v_f16m4_tu(vfloat16m4_t vd, const _Float16 *rs1, vuint32m8_t rs2, size_t vl) { +vfloat16m4_t test_vluxei32_v_f16m4_tu(vfloat16m4_t vd, const _Float16 *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vluxei32_v_f16m4_tu(vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei32_v_f32mf2_tu(vfloat32mf2_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2_t test_vluxei32_v_f32mf2_tu(vfloat32mf2_t vd, const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxei32_v_f32mf2_tu(vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei32_v_f32m1_tu(vfloat32m1_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1_t test_vluxei32_v_f32m1_tu(vfloat32m1_t vd, const float *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxei32_v_f32m1_tu(vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei32_v_f32m2_tu(vfloat32m2_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2_t test_vluxei32_v_f32m2_tu(vfloat32m2_t vd, const float *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxei32_v_f32m2_tu(vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei32_v_f32m4_tu(vfloat32m4_t vd, const float *rs1, vuint32m4_t rs2, size_t vl) { +vfloat32m4_t test_vluxei32_v_f32m4_tu(vfloat32m4_t vd, const float *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxei32_v_f32m4_tu(vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei32_v_f32m8_tu(vfloat32m8_t vd, const float *rs1, vuint32m8_t rs2, size_t vl) { +vfloat32m8_t test_vluxei32_v_f32m8_tu(vfloat32m8_t vd, const float *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vluxei32_v_f32m8_tu(vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei32_v_f64m1_tu(vfloat64m1_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1_t test_vluxei32_v_f64m1_tu(vfloat64m1_t vd, const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxei32_v_f64m1_tu(vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei32_v_f64m2_tu(vfloat64m2_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2_t test_vluxei32_v_f64m2_tu(vfloat64m2_t vd, const double *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxei32_v_f64m2_tu(vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei32_v_f64m4_tu(vfloat64m4_t vd, const double *rs1, vuint32m2_t rs2, size_t vl) { +vfloat64m4_t test_vluxei32_v_f64m4_tu(vfloat64m4_t vd, const double *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxei32_v_f64m4_tu(vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei32_v_f64m8_tu(vfloat64m8_t vd, const double *rs1, vuint32m4_t rs2, size_t vl) { +vfloat64m8_t test_vluxei32_v_f64m8_tu(vfloat64m8_t vd, const double *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxei32_v_f64m8_tu(vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei32_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8_t test_vluxei32_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxei32_v_i8mf8_tu(vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei32_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4_t test_vluxei32_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxei32_v_i8mf4_tu(vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei32_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2_t test_vluxei32_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxei32_v_i8mf2_tu(vd, rs1, rs2, vl); } -vint8m1_t test_vluxei32_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1_t test_vluxei32_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxei32_v_i8m1_tu(vd, rs1, rs2, vl); } -vint8m2_t test_vluxei32_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2_t test_vluxei32_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vluxei32_v_i8m2_tu(vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei32_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4_t test_vluxei32_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxei32_v_i16mf4_tu(vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei32_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2_t test_vluxei32_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxei32_v_i16mf2_tu(vd, rs1, rs2, vl); } -vint16m1_t test_vluxei32_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1_t test_vluxei32_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxei32_v_i16m1_tu(vd, rs1, rs2, vl); } -vint16m2_t test_vluxei32_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2_t test_vluxei32_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxei32_v_i16m2_tu(vd, rs1, rs2, vl); } -vint16m4_t test_vluxei32_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { +vint16m4_t test_vluxei32_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vluxei32_v_i16m4_tu(vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei32_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2_t test_vluxei32_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxei32_v_i32mf2_tu(vd, rs1, rs2, vl); } -vint32m1_t test_vluxei32_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1_t test_vluxei32_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxei32_v_i32m1_tu(vd, rs1, rs2, vl); } -vint32m2_t test_vluxei32_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2_t test_vluxei32_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxei32_v_i32m2_tu(vd, rs1, rs2, vl); } -vint32m4_t test_vluxei32_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { +vint32m4_t test_vluxei32_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxei32_v_i32m4_tu(vd, rs1, rs2, vl); } -vint32m8_t test_vluxei32_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, vuint32m8_t rs2, size_t vl) { +vint32m8_t test_vluxei32_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vluxei32_v_i32m8_tu(vd, rs1, rs2, vl); } -vint64m1_t test_vluxei32_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1_t test_vluxei32_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxei32_v_i64m1_tu(vd, rs1, rs2, vl); } -vint64m2_t test_vluxei32_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2_t test_vluxei32_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxei32_v_i64m2_tu(vd, rs1, rs2, vl); } -vint64m4_t test_vluxei32_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { +vint64m4_t test_vluxei32_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxei32_v_i64m4_tu(vd, rs1, rs2, vl); } -vint64m8_t test_vluxei32_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, vuint32m4_t rs2, size_t vl) { +vint64m8_t test_vluxei32_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxei32_v_i64m8_tu(vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei32_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8_t test_vluxei32_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxei32_v_u8mf8_tu(vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei32_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4_t test_vluxei32_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxei32_v_u8mf4_tu(vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei32_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2_t test_vluxei32_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxei32_v_u8mf2_tu(vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei32_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1_t test_vluxei32_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxei32_v_u8m1_tu(vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei32_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2_t test_vluxei32_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vluxei32_v_u8m2_tu(vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei32_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4_t test_vluxei32_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxei32_v_u16mf4_tu(vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei32_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2_t test_vluxei32_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxei32_v_u16mf2_tu(vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei32_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1_t test_vluxei32_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxei32_v_u16m1_tu(vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei32_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2_t test_vluxei32_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxei32_v_u16m2_tu(vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei32_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint16m4_t test_vluxei32_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vluxei32_v_u16m4_tu(vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei32_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2_t test_vluxei32_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxei32_v_u32mf2_tu(vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei32_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1_t test_vluxei32_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxei32_v_u32m1_tu(vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei32_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2_t test_vluxei32_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxei32_v_u32m2_tu(vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei32_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint32m4_t test_vluxei32_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxei32_v_u32m4_tu(vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei32_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint32m8_t test_vluxei32_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vluxei32_v_u32m8_tu(vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei32_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1_t test_vluxei32_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxei32_v_u64m1_tu(vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei32_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2_t test_vluxei32_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxei32_v_u64m2_tu(vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei32_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint64m4_t test_vluxei32_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxei32_v_u64m4_tu(vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei32_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint64m8_t test_vluxei32_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxei32_v_u64m8_tu(vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei32_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4_t test_vluxei32_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_f16mf4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei32_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2_t test_vluxei32_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_f16mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei32_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1_t test_vluxei32_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_f16m1_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei32_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2_t test_vluxei32_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxei32_v_f16m2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei32_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, const _Float16 *rs1, vuint32m8_t rs2, size_t vl) { +vfloat16m4_t test_vluxei32_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + const _Float16 *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxei32_v_f16m4_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei32_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2_t test_vluxei32_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_f32mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei32_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1_t test_vluxei32_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_f32m1_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei32_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2_t test_vluxei32_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_f32m2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei32_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float *rs1, vuint32m4_t rs2, size_t vl) { +vfloat32m4_t test_vluxei32_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxei32_v_f32m4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei32_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, const float *rs1, vuint32m8_t rs2, size_t vl) { +vfloat32m8_t test_vluxei32_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + const float *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxei32_v_f32m8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei32_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1_t test_vluxei32_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_f64m1_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei32_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2_t test_vluxei32_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_f64m2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei32_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const double *rs1, vuint32m2_t rs2, size_t vl) { +vfloat64m4_t test_vluxei32_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_f64m4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei32_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const double *rs1, vuint32m4_t rs2, size_t vl) { +vfloat64m8_t test_vluxei32_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxei32_v_f64m8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei32_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8_t test_vluxei32_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + const int8_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_i8mf8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei32_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4_t test_vluxei32_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_i8mf4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei32_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2_t test_vluxei32_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_i8mf2_tum(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei32_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1_t test_vluxei32_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxei32_v_i8m1_tum(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vluxei32_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2_t test_vluxei32_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vluxei32_v_i8m2_tum(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei32_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4_t test_vluxei32_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_i16mf4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei32_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2_t test_vluxei32_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_i16mf2_tum(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei32_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1_t test_vluxei32_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_i16m1_tum(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei32_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2_t test_vluxei32_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxei32_v_i16m2_tum(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vluxei32_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { +vint16m4_t test_vluxei32_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, + const int16_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxei32_v_i16m4_tum(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei32_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2_t test_vluxei32_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_i32mf2_tum(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei32_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1_t test_vluxei32_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_i32m1_tum(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei32_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2_t test_vluxei32_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_i32m2_tum(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei32_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { +vint32m4_t test_vluxei32_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxei32_v_i32m4_tum(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vluxei32_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint32m8_t rs2, size_t vl) { +vint32m8_t test_vluxei32_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, + const int32_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxei32_v_i32m8_tum(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei32_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1_t test_vluxei32_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_i64m1_tum(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei32_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2_t test_vluxei32_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_i64m2_tum(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei32_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { +vint64m4_t test_vluxei32_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_i64m4_tum(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei32_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint32m4_t rs2, size_t vl) { +vint64m8_t test_vluxei32_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxei32_v_i64m8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei32_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8_t test_vluxei32_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_u8mf8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei32_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4_t test_vluxei32_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_u8mf4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei32_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2_t test_vluxei32_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_u8mf2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei32_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1_t test_vluxei32_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxei32_v_u8m1_tum(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei32_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2_t test_vluxei32_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, + const uint8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxei32_v_u8m2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei32_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4_t test_vluxei32_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_u16mf4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei32_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2_t test_vluxei32_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_u16mf2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei32_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1_t test_vluxei32_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_u16m1_tum(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei32_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2_t test_vluxei32_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxei32_v_u16m2_tum(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei32_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint16m4_t test_vluxei32_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + const uint16_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxei32_v_u16m4_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei32_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2_t test_vluxei32_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_u32mf2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei32_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1_t test_vluxei32_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_u32m1_tum(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei32_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2_t test_vluxei32_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_u32m2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei32_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint32m4_t test_vluxei32_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxei32_v_u32m4_tum(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei32_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint32m8_t test_vluxei32_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + const uint32_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxei32_v_u32m8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei32_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1_t test_vluxei32_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_u64m1_tum(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei32_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2_t test_vluxei32_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_u64m2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei32_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint64m4_t test_vluxei32_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_u64m4_tum(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei32_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint64m8_t test_vluxei32_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxei32_v_u64m8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei32_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4_t test_vluxei32_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_f16mf4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei32_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2_t test_vluxei32_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_f16mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei32_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1_t test_vluxei32_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_f16m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei32_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2_t test_vluxei32_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxei32_v_f16m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei32_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, const _Float16 *rs1, vuint32m8_t rs2, size_t vl) { +vfloat16m4_t test_vluxei32_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + const _Float16 *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxei32_v_f16m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei32_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2_t test_vluxei32_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_f32mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei32_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1_t test_vluxei32_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_f32m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei32_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2_t test_vluxei32_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_f32m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei32_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float *rs1, vuint32m4_t rs2, size_t vl) { +vfloat32m4_t test_vluxei32_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxei32_v_f32m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei32_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, const float *rs1, vuint32m8_t rs2, size_t vl) { +vfloat32m8_t test_vluxei32_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + const float *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxei32_v_f32m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei32_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1_t test_vluxei32_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_f64m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei32_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2_t test_vluxei32_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_f64m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei32_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const double *rs1, vuint32m2_t rs2, size_t vl) { +vfloat64m4_t test_vluxei32_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_f64m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei32_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const double *rs1, vuint32m4_t rs2, size_t vl) { +vfloat64m8_t test_vluxei32_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxei32_v_f64m8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei32_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8_t test_vluxei32_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + const int8_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_i8mf8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei32_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4_t test_vluxei32_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_i8mf4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei32_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2_t test_vluxei32_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_i8mf2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei32_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1_t test_vluxei32_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxei32_v_i8m1_tumu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vluxei32_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2_t test_vluxei32_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, + const int8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxei32_v_i8m2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei32_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4_t test_vluxei32_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_i16mf4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei32_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2_t test_vluxei32_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_i16mf2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei32_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1_t test_vluxei32_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_i16m1_tumu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei32_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2_t test_vluxei32_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxei32_v_i16m2_tumu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vluxei32_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { +vint16m4_t test_vluxei32_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, + const int16_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxei32_v_i16m4_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei32_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2_t test_vluxei32_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_i32mf2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei32_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1_t test_vluxei32_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_i32m1_tumu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei32_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2_t test_vluxei32_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_i32m2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei32_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { +vint32m4_t test_vluxei32_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxei32_v_i32m4_tumu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vluxei32_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint32m8_t rs2, size_t vl) { +vint32m8_t test_vluxei32_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, + const int32_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxei32_v_i32m8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei32_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1_t test_vluxei32_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_i64m1_tumu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei32_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2_t test_vluxei32_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_i64m2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei32_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { +vint64m4_t test_vluxei32_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_i64m4_tumu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei32_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint32m4_t rs2, size_t vl) { +vint64m8_t test_vluxei32_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxei32_v_i64m8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei32_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8_t test_vluxei32_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_u8mf8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei32_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4_t test_vluxei32_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_u8mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei32_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2_t test_vluxei32_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_u8mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei32_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1_t test_vluxei32_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxei32_v_u8m1_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei32_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2_t test_vluxei32_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + const uint8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxei32_v_u8m2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei32_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4_t test_vluxei32_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_u16mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei32_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2_t test_vluxei32_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_u16mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei32_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1_t test_vluxei32_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_u16m1_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei32_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2_t test_vluxei32_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxei32_v_u16m2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei32_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint16m4_t test_vluxei32_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + const uint16_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxei32_v_u16m4_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei32_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2_t test_vluxei32_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_u32mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei32_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1_t test_vluxei32_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_u32m1_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei32_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2_t test_vluxei32_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_u32m2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei32_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint32m4_t test_vluxei32_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxei32_v_u32m4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei32_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint32m8_t test_vluxei32_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + const uint32_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxei32_v_u32m8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei32_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1_t test_vluxei32_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_u64m1_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei32_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2_t test_vluxei32_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_u64m2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei32_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint64m4_t test_vluxei32_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_u64m4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei32_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint64m8_t test_vluxei32_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxei32_v_u64m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei32_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4_t test_vluxei32_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_f16mf4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei32_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2_t test_vluxei32_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_f16mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei32_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1_t test_vluxei32_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_f16m1_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei32_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2_t test_vluxei32_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxei32_v_f16m2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei32_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, const _Float16 *rs1, vuint32m8_t rs2, size_t vl) { +vfloat16m4_t test_vluxei32_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + const _Float16 *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxei32_v_f16m4_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei32_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2_t test_vluxei32_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_f32mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei32_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1_t test_vluxei32_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_f32m1_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei32_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2_t test_vluxei32_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_f32m2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei32_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float *rs1, vuint32m4_t rs2, size_t vl) { +vfloat32m4_t test_vluxei32_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxei32_v_f32m4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei32_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, const float *rs1, vuint32m8_t rs2, size_t vl) { +vfloat32m8_t test_vluxei32_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + const float *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxei32_v_f32m8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei32_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1_t test_vluxei32_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_f64m1_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei32_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2_t test_vluxei32_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_f64m2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei32_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const double *rs1, vuint32m2_t rs2, size_t vl) { +vfloat64m4_t test_vluxei32_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_f64m4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei32_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const double *rs1, vuint32m4_t rs2, size_t vl) { +vfloat64m8_t test_vluxei32_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxei32_v_f64m8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei32_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8_t test_vluxei32_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, + const int8_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_i8mf8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei32_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4_t test_vluxei32_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_i8mf4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei32_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2_t test_vluxei32_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_i8mf2_mu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei32_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1_t test_vluxei32_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxei32_v_i8m1_mu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vluxei32_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2_t test_vluxei32_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vluxei32_v_i8m2_mu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei32_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4_t test_vluxei32_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_i16mf4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei32_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2_t test_vluxei32_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_i16mf2_mu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei32_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1_t test_vluxei32_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_i16m1_mu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei32_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2_t test_vluxei32_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxei32_v_i16m2_mu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vluxei32_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { +vint16m4_t test_vluxei32_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, + const int16_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxei32_v_i16m4_mu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei32_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2_t test_vluxei32_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_i32mf2_mu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei32_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1_t test_vluxei32_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_i32m1_mu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei32_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2_t test_vluxei32_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_i32m2_mu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei32_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { +vint32m4_t test_vluxei32_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxei32_v_i32m4_mu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vluxei32_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint32m8_t rs2, size_t vl) { +vint32m8_t test_vluxei32_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, + const int32_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxei32_v_i32m8_mu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei32_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1_t test_vluxei32_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_i64m1_mu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei32_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2_t test_vluxei32_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_i64m2_mu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei32_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { +vint64m4_t test_vluxei32_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_i64m4_mu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei32_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint32m4_t rs2, size_t vl) { +vint64m8_t test_vluxei32_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxei32_v_i64m8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei32_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8_t test_vluxei32_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_u8mf8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei32_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4_t test_vluxei32_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_u8mf4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei32_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2_t test_vluxei32_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_u8mf2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei32_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1_t test_vluxei32_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxei32_v_u8m1_mu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei32_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2_t test_vluxei32_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, + const uint8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxei32_v_u8m2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei32_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4_t test_vluxei32_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_u16mf4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei32_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2_t test_vluxei32_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_u16mf2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei32_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1_t test_vluxei32_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_u16m1_mu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei32_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2_t test_vluxei32_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxei32_v_u16m2_mu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei32_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint16m4_t test_vluxei32_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + const uint16_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxei32_v_u16m4_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei32_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2_t test_vluxei32_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_u32mf2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei32_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1_t test_vluxei32_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_u32m1_mu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei32_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2_t test_vluxei32_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_u32m2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei32_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint32m4_t test_vluxei32_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxei32_v_u32m4_mu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei32_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint32m8_t test_vluxei32_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + const uint32_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxei32_v_u32m8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei32_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1_t test_vluxei32_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxei32_v_u64m1_mu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei32_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2_t test_vluxei32_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxei32_v_u64m2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei32_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint64m4_t test_vluxei32_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxei32_v_u64m4_mu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei32_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint64m8_t test_vluxei32_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxei32_v_u64m8_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxei64.c b/auto-generated/policy_funcs/llvm-api-tests/vluxei64.c index 612861828..c22a2f4ec 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxei64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxei64.c @@ -1,711 +1,1017 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vluxei64_v_f16mf4_tu(vfloat16mf4_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4_t test_vluxei64_v_f16mf4_tu(vfloat16mf4_t vd, const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxei64_v_f16mf4_tu(vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei64_v_f16mf2_tu(vfloat16mf2_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2_t test_vluxei64_v_f16mf2_tu(vfloat16mf2_t vd, const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxei64_v_f16mf2_tu(vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei64_v_f16m1_tu(vfloat16m1_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1_t test_vluxei64_v_f16m1_tu(vfloat16m1_t vd, const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxei64_v_f16m1_tu(vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei64_v_f16m2_tu(vfloat16m2_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2_t test_vluxei64_v_f16m2_tu(vfloat16m2_t vd, const _Float16 *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxei64_v_f16m2_tu(vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei64_v_f32mf2_tu(vfloat32mf2_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2_t test_vluxei64_v_f32mf2_tu(vfloat32mf2_t vd, const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxei64_v_f32mf2_tu(vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei64_v_f32m1_tu(vfloat32m1_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1_t test_vluxei64_v_f32m1_tu(vfloat32m1_t vd, const float *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxei64_v_f32m1_tu(vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei64_v_f32m2_tu(vfloat32m2_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2_t test_vluxei64_v_f32m2_tu(vfloat32m2_t vd, const float *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxei64_v_f32m2_tu(vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei64_v_f32m4_tu(vfloat32m4_t vd, const float *rs1, vuint64m8_t rs2, size_t vl) { +vfloat32m4_t test_vluxei64_v_f32m4_tu(vfloat32m4_t vd, const float *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxei64_v_f32m4_tu(vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei64_v_f64m1_tu(vfloat64m1_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1_t test_vluxei64_v_f64m1_tu(vfloat64m1_t vd, const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxei64_v_f64m1_tu(vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei64_v_f64m2_tu(vfloat64m2_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2_t test_vluxei64_v_f64m2_tu(vfloat64m2_t vd, const double *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxei64_v_f64m2_tu(vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei64_v_f64m4_tu(vfloat64m4_t vd, const double *rs1, vuint64m4_t rs2, size_t vl) { +vfloat64m4_t test_vluxei64_v_f64m4_tu(vfloat64m4_t vd, const double *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxei64_v_f64m4_tu(vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei64_v_f64m8_tu(vfloat64m8_t vd, const double *rs1, vuint64m8_t rs2, size_t vl) { +vfloat64m8_t test_vluxei64_v_f64m8_tu(vfloat64m8_t vd, const double *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxei64_v_f64m8_tu(vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei64_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8_t test_vluxei64_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxei64_v_i8mf8_tu(vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei64_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4_t test_vluxei64_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxei64_v_i8mf4_tu(vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei64_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2_t test_vluxei64_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxei64_v_i8mf2_tu(vd, rs1, rs2, vl); } -vint8m1_t test_vluxei64_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1_t test_vluxei64_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxei64_v_i8m1_tu(vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei64_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4_t test_vluxei64_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxei64_v_i16mf4_tu(vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei64_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2_t test_vluxei64_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxei64_v_i16mf2_tu(vd, rs1, rs2, vl); } -vint16m1_t test_vluxei64_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1_t test_vluxei64_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxei64_v_i16m1_tu(vd, rs1, rs2, vl); } -vint16m2_t test_vluxei64_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2_t test_vluxei64_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxei64_v_i16m2_tu(vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei64_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2_t test_vluxei64_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxei64_v_i32mf2_tu(vd, rs1, rs2, vl); } -vint32m1_t test_vluxei64_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1_t test_vluxei64_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxei64_v_i32m1_tu(vd, rs1, rs2, vl); } -vint32m2_t test_vluxei64_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2_t test_vluxei64_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxei64_v_i32m2_tu(vd, rs1, rs2, vl); } -vint32m4_t test_vluxei64_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { +vint32m4_t test_vluxei64_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxei64_v_i32m4_tu(vd, rs1, rs2, vl); } -vint64m1_t test_vluxei64_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1_t test_vluxei64_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxei64_v_i64m1_tu(vd, rs1, rs2, vl); } -vint64m2_t test_vluxei64_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2_t test_vluxei64_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxei64_v_i64m2_tu(vd, rs1, rs2, vl); } -vint64m4_t test_vluxei64_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { +vint64m4_t test_vluxei64_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxei64_v_i64m4_tu(vd, rs1, rs2, vl); } -vint64m8_t test_vluxei64_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, vuint64m8_t rs2, size_t vl) { +vint64m8_t test_vluxei64_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxei64_v_i64m8_tu(vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei64_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8_t test_vluxei64_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxei64_v_u8mf8_tu(vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei64_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4_t test_vluxei64_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxei64_v_u8mf4_tu(vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei64_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2_t test_vluxei64_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxei64_v_u8mf2_tu(vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei64_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1_t test_vluxei64_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxei64_v_u8m1_tu(vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei64_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4_t test_vluxei64_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxei64_v_u16mf4_tu(vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei64_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2_t test_vluxei64_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxei64_v_u16mf2_tu(vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei64_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1_t test_vluxei64_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxei64_v_u16m1_tu(vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei64_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2_t test_vluxei64_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxei64_v_u16m2_tu(vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei64_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2_t test_vluxei64_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxei64_v_u32mf2_tu(vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei64_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1_t test_vluxei64_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxei64_v_u32m1_tu(vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei64_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2_t test_vluxei64_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxei64_v_u32m2_tu(vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei64_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint32m4_t test_vluxei64_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxei64_v_u32m4_tu(vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei64_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1_t test_vluxei64_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxei64_v_u64m1_tu(vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei64_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2_t test_vluxei64_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxei64_v_u64m2_tu(vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei64_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint64m4_t test_vluxei64_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxei64_v_u64m4_tu(vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei64_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint64m8_t test_vluxei64_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxei64_v_u64m8_tu(vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei64_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4_t test_vluxei64_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_f16mf4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei64_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2_t test_vluxei64_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_f16mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei64_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1_t test_vluxei64_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_f16m1_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei64_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2_t test_vluxei64_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxei64_v_f16m2_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei64_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2_t test_vluxei64_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_f32mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei64_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1_t test_vluxei64_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_f32m1_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei64_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2_t test_vluxei64_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_f32m2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei64_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float *rs1, vuint64m8_t rs2, size_t vl) { +vfloat32m4_t test_vluxei64_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxei64_v_f32m4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei64_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1_t test_vluxei64_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_f64m1_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei64_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2_t test_vluxei64_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_f64m2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei64_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const double *rs1, vuint64m4_t rs2, size_t vl) { +vfloat64m4_t test_vluxei64_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_f64m4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei64_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const double *rs1, vuint64m8_t rs2, size_t vl) { +vfloat64m8_t test_vluxei64_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxei64_v_f64m8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei64_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8_t test_vluxei64_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_i8mf8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei64_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4_t test_vluxei64_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_i8mf4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei64_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2_t test_vluxei64_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_i8mf2_tum(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei64_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1_t test_vluxei64_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxei64_v_i8m1_tum(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei64_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4_t test_vluxei64_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_i16mf4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei64_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2_t test_vluxei64_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_i16mf2_tum(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei64_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1_t test_vluxei64_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_i16m1_tum(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei64_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2_t test_vluxei64_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxei64_v_i16m2_tum(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei64_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2_t test_vluxei64_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_i32mf2_tum(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei64_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1_t test_vluxei64_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_i32m1_tum(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei64_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2_t test_vluxei64_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_i32m2_tum(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei64_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { +vint32m4_t test_vluxei64_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxei64_v_i32m4_tum(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei64_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1_t test_vluxei64_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_i64m1_tum(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei64_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2_t test_vluxei64_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_i64m2_tum(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei64_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { +vint64m4_t test_vluxei64_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_i64m4_tum(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei64_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint64m8_t rs2, size_t vl) { +vint64m8_t test_vluxei64_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxei64_v_i64m8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei64_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8_t test_vluxei64_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_u8mf8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei64_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4_t test_vluxei64_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_u8mf4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei64_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2_t test_vluxei64_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_u8mf2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei64_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1_t test_vluxei64_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxei64_v_u8m1_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei64_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4_t test_vluxei64_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_u16mf4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei64_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2_t test_vluxei64_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_u16mf2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei64_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1_t test_vluxei64_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_u16m1_tum(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei64_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2_t test_vluxei64_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxei64_v_u16m2_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei64_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2_t test_vluxei64_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_u32mf2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei64_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1_t test_vluxei64_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_u32m1_tum(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei64_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2_t test_vluxei64_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_u32m2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei64_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint32m4_t test_vluxei64_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxei64_v_u32m4_tum(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei64_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1_t test_vluxei64_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_u64m1_tum(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei64_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2_t test_vluxei64_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_u64m2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei64_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint64m4_t test_vluxei64_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_u64m4_tum(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei64_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint64m8_t test_vluxei64_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxei64_v_u64m8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei64_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4_t test_vluxei64_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_f16mf4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei64_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2_t test_vluxei64_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_f16mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei64_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1_t test_vluxei64_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_f16m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei64_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2_t test_vluxei64_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxei64_v_f16m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei64_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2_t test_vluxei64_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_f32mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei64_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1_t test_vluxei64_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_f32m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei64_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2_t test_vluxei64_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_f32m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei64_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float *rs1, vuint64m8_t rs2, size_t vl) { +vfloat32m4_t test_vluxei64_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxei64_v_f32m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei64_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1_t test_vluxei64_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_f64m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei64_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2_t test_vluxei64_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_f64m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei64_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const double *rs1, vuint64m4_t rs2, size_t vl) { +vfloat64m4_t test_vluxei64_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_f64m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei64_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const double *rs1, vuint64m8_t rs2, size_t vl) { +vfloat64m8_t test_vluxei64_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxei64_v_f64m8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei64_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8_t test_vluxei64_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_i8mf8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei64_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4_t test_vluxei64_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_i8mf4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei64_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2_t test_vluxei64_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_i8mf2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei64_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1_t test_vluxei64_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxei64_v_i8m1_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei64_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4_t test_vluxei64_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_i16mf4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei64_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2_t test_vluxei64_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_i16mf2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei64_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1_t test_vluxei64_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_i16m1_tumu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei64_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2_t test_vluxei64_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxei64_v_i16m2_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei64_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2_t test_vluxei64_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_i32mf2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei64_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1_t test_vluxei64_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_i32m1_tumu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei64_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2_t test_vluxei64_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_i32m2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei64_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { +vint32m4_t test_vluxei64_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxei64_v_i32m4_tumu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei64_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1_t test_vluxei64_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_i64m1_tumu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei64_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2_t test_vluxei64_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_i64m2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei64_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { +vint64m4_t test_vluxei64_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_i64m4_tumu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei64_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint64m8_t rs2, size_t vl) { +vint64m8_t test_vluxei64_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxei64_v_i64m8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei64_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8_t test_vluxei64_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_u8mf8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei64_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4_t test_vluxei64_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_u8mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei64_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2_t test_vluxei64_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_u8mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei64_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1_t test_vluxei64_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxei64_v_u8m1_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei64_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4_t test_vluxei64_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_u16mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei64_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2_t test_vluxei64_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_u16mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei64_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1_t test_vluxei64_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_u16m1_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei64_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2_t test_vluxei64_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxei64_v_u16m2_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei64_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2_t test_vluxei64_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_u32mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei64_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1_t test_vluxei64_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_u32m1_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei64_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2_t test_vluxei64_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_u32m2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei64_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint32m4_t test_vluxei64_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxei64_v_u32m4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei64_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1_t test_vluxei64_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_u64m1_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei64_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2_t test_vluxei64_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_u64m2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei64_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint64m4_t test_vluxei64_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_u64m4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei64_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint64m8_t test_vluxei64_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxei64_v_u64m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei64_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4_t test_vluxei64_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_f16mf4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei64_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2_t test_vluxei64_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_f16mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei64_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1_t test_vluxei64_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_f16m1_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei64_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2_t test_vluxei64_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxei64_v_f16m2_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei64_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2_t test_vluxei64_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_f32mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei64_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1_t test_vluxei64_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_f32m1_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei64_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2_t test_vluxei64_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_f32m2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei64_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float *rs1, vuint64m8_t rs2, size_t vl) { +vfloat32m4_t test_vluxei64_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxei64_v_f32m4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei64_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1_t test_vluxei64_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_f64m1_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei64_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2_t test_vluxei64_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_f64m2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei64_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const double *rs1, vuint64m4_t rs2, size_t vl) { +vfloat64m4_t test_vluxei64_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_f64m4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei64_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const double *rs1, vuint64m8_t rs2, size_t vl) { +vfloat64m8_t test_vluxei64_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxei64_v_f64m8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei64_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8_t test_vluxei64_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_i8mf8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei64_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4_t test_vluxei64_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_i8mf4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei64_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2_t test_vluxei64_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_i8mf2_mu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei64_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1_t test_vluxei64_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxei64_v_i8m1_mu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei64_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4_t test_vluxei64_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_i16mf4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei64_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2_t test_vluxei64_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_i16mf2_mu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei64_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1_t test_vluxei64_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_i16m1_mu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei64_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2_t test_vluxei64_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxei64_v_i16m2_mu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei64_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2_t test_vluxei64_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_i32mf2_mu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei64_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1_t test_vluxei64_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_i32m1_mu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei64_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2_t test_vluxei64_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_i32m2_mu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei64_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { +vint32m4_t test_vluxei64_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxei64_v_i32m4_mu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei64_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1_t test_vluxei64_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_i64m1_mu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei64_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2_t test_vluxei64_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_i64m2_mu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei64_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { +vint64m4_t test_vluxei64_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_i64m4_mu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei64_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint64m8_t rs2, size_t vl) { +vint64m8_t test_vluxei64_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxei64_v_i64m8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei64_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8_t test_vluxei64_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_u8mf8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei64_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4_t test_vluxei64_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_u8mf4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei64_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2_t test_vluxei64_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_u8mf2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei64_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1_t test_vluxei64_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxei64_v_u8m1_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei64_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4_t test_vluxei64_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_u16mf4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei64_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2_t test_vluxei64_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_u16mf2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei64_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1_t test_vluxei64_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_u16m1_mu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei64_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2_t test_vluxei64_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxei64_v_u16m2_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei64_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2_t test_vluxei64_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_u32mf2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei64_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1_t test_vluxei64_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_u32m1_mu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei64_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2_t test_vluxei64_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_u32m2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei64_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint32m4_t test_vluxei64_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxei64_v_u32m4_mu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei64_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1_t test_vluxei64_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxei64_v_u64m1_mu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei64_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2_t test_vluxei64_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxei64_v_u64m2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei64_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint64m4_t test_vluxei64_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxei64_v_u64m4_mu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei64_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint64m8_t test_vluxei64_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxei64_v_u64m8_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxei8.c b/auto-generated/policy_funcs/llvm-api-tests/vluxei8.c index 9cc4c0958..212bc1c77 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxei8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxei8.c @@ -1,951 +1,1352 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vluxei8_v_f16mf4_tu(vfloat16mf4_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4_t test_vluxei8_v_f16mf4_tu(vfloat16mf4_t vd, const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxei8_v_f16mf4_tu(vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei8_v_f16mf2_tu(vfloat16mf2_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2_t test_vluxei8_v_f16mf2_tu(vfloat16mf2_t vd, const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxei8_v_f16mf2_tu(vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei8_v_f16m1_tu(vfloat16m1_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1_t test_vluxei8_v_f16m1_tu(vfloat16m1_t vd, const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxei8_v_f16m1_tu(vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei8_v_f16m2_tu(vfloat16m2_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2_t test_vluxei8_v_f16m2_tu(vfloat16m2_t vd, const _Float16 *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxei8_v_f16m2_tu(vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei8_v_f16m4_tu(vfloat16m4_t vd, const _Float16 *rs1, vuint8m2_t rs2, size_t vl) { +vfloat16m4_t test_vluxei8_v_f16m4_tu(vfloat16m4_t vd, const _Float16 *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vluxei8_v_f16m4_tu(vd, rs1, rs2, vl); } -vfloat16m8_t test_vluxei8_v_f16m8_tu(vfloat16m8_t vd, const _Float16 *rs1, vuint8m4_t rs2, size_t vl) { +vfloat16m8_t test_vluxei8_v_f16m8_tu(vfloat16m8_t vd, const _Float16 *rs1, + vuint8m4_t rs2, size_t vl) { return __riscv_vluxei8_v_f16m8_tu(vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei8_v_f32mf2_tu(vfloat32mf2_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2_t test_vluxei8_v_f32mf2_tu(vfloat32mf2_t vd, const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxei8_v_f32mf2_tu(vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei8_v_f32m1_tu(vfloat32m1_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1_t test_vluxei8_v_f32m1_tu(vfloat32m1_t vd, const float *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxei8_v_f32m1_tu(vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei8_v_f32m2_tu(vfloat32m2_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2_t test_vluxei8_v_f32m2_tu(vfloat32m2_t vd, const float *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxei8_v_f32m2_tu(vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei8_v_f32m4_tu(vfloat32m4_t vd, const float *rs1, vuint8m1_t rs2, size_t vl) { +vfloat32m4_t test_vluxei8_v_f32m4_tu(vfloat32m4_t vd, const float *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxei8_v_f32m4_tu(vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei8_v_f32m8_tu(vfloat32m8_t vd, const float *rs1, vuint8m2_t rs2, size_t vl) { +vfloat32m8_t test_vluxei8_v_f32m8_tu(vfloat32m8_t vd, const float *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vluxei8_v_f32m8_tu(vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei8_v_f64m1_tu(vfloat64m1_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1_t test_vluxei8_v_f64m1_tu(vfloat64m1_t vd, const double *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxei8_v_f64m1_tu(vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei8_v_f64m2_tu(vfloat64m2_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2_t test_vluxei8_v_f64m2_tu(vfloat64m2_t vd, const double *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxei8_v_f64m2_tu(vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei8_v_f64m4_tu(vfloat64m4_t vd, const double *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat64m4_t test_vluxei8_v_f64m4_tu(vfloat64m4_t vd, const double *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxei8_v_f64m4_tu(vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei8_v_f64m8_tu(vfloat64m8_t vd, const double *rs1, vuint8m1_t rs2, size_t vl) { +vfloat64m8_t test_vluxei8_v_f64m8_tu(vfloat64m8_t vd, const double *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxei8_v_f64m8_tu(vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei8_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8_t test_vluxei8_v_i8mf8_tu(vint8mf8_t vd, const int8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxei8_v_i8mf8_tu(vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei8_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4_t test_vluxei8_v_i8mf4_tu(vint8mf4_t vd, const int8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxei8_v_i8mf4_tu(vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei8_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2_t test_vluxei8_v_i8mf2_tu(vint8mf2_t vd, const int8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxei8_v_i8mf2_tu(vd, rs1, rs2, vl); } -vint8m1_t test_vluxei8_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1_t test_vluxei8_v_i8m1_tu(vint8m1_t vd, const int8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxei8_v_i8m1_tu(vd, rs1, rs2, vl); } -vint8m2_t test_vluxei8_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2_t test_vluxei8_v_i8m2_tu(vint8m2_t vd, const int8_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vluxei8_v_i8m2_tu(vd, rs1, rs2, vl); } -vint8m4_t test_vluxei8_v_i8m4_tu(vint8m4_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { +vint8m4_t test_vluxei8_v_i8m4_tu(vint8m4_t vd, const int8_t *rs1, + vuint8m4_t rs2, size_t vl) { return __riscv_vluxei8_v_i8m4_tu(vd, rs1, rs2, vl); } -vint8m8_t test_vluxei8_v_i8m8_tu(vint8m8_t vd, const int8_t *rs1, vuint8m8_t rs2, size_t vl) { +vint8m8_t test_vluxei8_v_i8m8_tu(vint8m8_t vd, const int8_t *rs1, + vuint8m8_t rs2, size_t vl) { return __riscv_vluxei8_v_i8m8_tu(vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei8_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4_t test_vluxei8_v_i16mf4_tu(vint16mf4_t vd, const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxei8_v_i16mf4_tu(vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei8_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2_t test_vluxei8_v_i16mf2_tu(vint16mf2_t vd, const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxei8_v_i16mf2_tu(vd, rs1, rs2, vl); } -vint16m1_t test_vluxei8_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1_t test_vluxei8_v_i16m1_tu(vint16m1_t vd, const int16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxei8_v_i16m1_tu(vd, rs1, rs2, vl); } -vint16m2_t test_vluxei8_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2_t test_vluxei8_v_i16m2_tu(vint16m2_t vd, const int16_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxei8_v_i16m2_tu(vd, rs1, rs2, vl); } -vint16m4_t test_vluxei8_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { +vint16m4_t test_vluxei8_v_i16m4_tu(vint16m4_t vd, const int16_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vluxei8_v_i16m4_tu(vd, rs1, rs2, vl); } -vint16m8_t test_vluxei8_v_i16m8_tu(vint16m8_t vd, const int16_t *rs1, vuint8m4_t rs2, size_t vl) { +vint16m8_t test_vluxei8_v_i16m8_tu(vint16m8_t vd, const int16_t *rs1, + vuint8m4_t rs2, size_t vl) { return __riscv_vluxei8_v_i16m8_tu(vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei8_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2_t test_vluxei8_v_i32mf2_tu(vint32mf2_t vd, const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxei8_v_i32mf2_tu(vd, rs1, rs2, vl); } -vint32m1_t test_vluxei8_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1_t test_vluxei8_v_i32m1_tu(vint32m1_t vd, const int32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxei8_v_i32m1_tu(vd, rs1, rs2, vl); } -vint32m2_t test_vluxei8_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2_t test_vluxei8_v_i32m2_tu(vint32m2_t vd, const int32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxei8_v_i32m2_tu(vd, rs1, rs2, vl); } -vint32m4_t test_vluxei8_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { +vint32m4_t test_vluxei8_v_i32m4_tu(vint32m4_t vd, const int32_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxei8_v_i32m4_tu(vd, rs1, rs2, vl); } -vint32m8_t test_vluxei8_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, vuint8m2_t rs2, size_t vl) { +vint32m8_t test_vluxei8_v_i32m8_tu(vint32m8_t vd, const int32_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vluxei8_v_i32m8_tu(vd, rs1, rs2, vl); } -vint64m1_t test_vluxei8_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1_t test_vluxei8_v_i64m1_tu(vint64m1_t vd, const int64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxei8_v_i64m1_tu(vd, rs1, rs2, vl); } -vint64m2_t test_vluxei8_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2_t test_vluxei8_v_i64m2_tu(vint64m2_t vd, const int64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxei8_v_i64m2_tu(vd, rs1, rs2, vl); } -vint64m4_t test_vluxei8_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint64m4_t test_vluxei8_v_i64m4_tu(vint64m4_t vd, const int64_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxei8_v_i64m4_tu(vd, rs1, rs2, vl); } -vint64m8_t test_vluxei8_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, vuint8m1_t rs2, size_t vl) { +vint64m8_t test_vluxei8_v_i64m8_tu(vint64m8_t vd, const int64_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxei8_v_i64m8_tu(vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei8_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8_t test_vluxei8_v_u8mf8_tu(vuint8mf8_t vd, const uint8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxei8_v_u8mf8_tu(vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei8_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4_t test_vluxei8_v_u8mf4_tu(vuint8mf4_t vd, const uint8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxei8_v_u8mf4_tu(vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei8_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2_t test_vluxei8_v_u8mf2_tu(vuint8mf2_t vd, const uint8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxei8_v_u8mf2_tu(vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei8_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1_t test_vluxei8_v_u8m1_tu(vuint8m1_t vd, const uint8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxei8_v_u8m1_tu(vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei8_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2_t test_vluxei8_v_u8m2_tu(vuint8m2_t vd, const uint8_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vluxei8_v_u8m2_tu(vd, rs1, rs2, vl); } -vuint8m4_t test_vluxei8_v_u8m4_tu(vuint8m4_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { +vuint8m4_t test_vluxei8_v_u8m4_tu(vuint8m4_t vd, const uint8_t *rs1, + vuint8m4_t rs2, size_t vl) { return __riscv_vluxei8_v_u8m4_tu(vd, rs1, rs2, vl); } -vuint8m8_t test_vluxei8_v_u8m8_tu(vuint8m8_t vd, const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { +vuint8m8_t test_vluxei8_v_u8m8_tu(vuint8m8_t vd, const uint8_t *rs1, + vuint8m8_t rs2, size_t vl) { return __riscv_vluxei8_v_u8m8_tu(vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei8_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4_t test_vluxei8_v_u16mf4_tu(vuint16mf4_t vd, const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxei8_v_u16mf4_tu(vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei8_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2_t test_vluxei8_v_u16mf2_tu(vuint16mf2_t vd, const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxei8_v_u16mf2_tu(vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei8_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1_t test_vluxei8_v_u16m1_tu(vuint16m1_t vd, const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxei8_v_u16m1_tu(vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei8_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2_t test_vluxei8_v_u16m2_tu(vuint16m2_t vd, const uint16_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxei8_v_u16m2_tu(vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei8_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint16m4_t test_vluxei8_v_u16m4_tu(vuint16m4_t vd, const uint16_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vluxei8_v_u16m4_tu(vd, rs1, rs2, vl); } -vuint16m8_t test_vluxei8_v_u16m8_tu(vuint16m8_t vd, const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { +vuint16m8_t test_vluxei8_v_u16m8_tu(vuint16m8_t vd, const uint16_t *rs1, + vuint8m4_t rs2, size_t vl) { return __riscv_vluxei8_v_u16m8_tu(vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei8_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2_t test_vluxei8_v_u32mf2_tu(vuint32mf2_t vd, const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxei8_v_u32mf2_tu(vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei8_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1_t test_vluxei8_v_u32m1_tu(vuint32m1_t vd, const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxei8_v_u32m1_tu(vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei8_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2_t test_vluxei8_v_u32m2_tu(vuint32m2_t vd, const uint32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxei8_v_u32m2_tu(vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei8_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint32m4_t test_vluxei8_v_u32m4_tu(vuint32m4_t vd, const uint32_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxei8_v_u32m4_tu(vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei8_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint32m8_t test_vluxei8_v_u32m8_tu(vuint32m8_t vd, const uint32_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vluxei8_v_u32m8_tu(vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei8_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1_t test_vluxei8_v_u64m1_tu(vuint64m1_t vd, const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxei8_v_u64m1_tu(vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei8_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2_t test_vluxei8_v_u64m2_tu(vuint64m2_t vd, const uint64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxei8_v_u64m2_tu(vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei8_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint64m4_t test_vluxei8_v_u64m4_tu(vuint64m4_t vd, const uint64_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxei8_v_u64m4_tu(vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei8_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint64m8_t test_vluxei8_v_u64m8_tu(vuint64m8_t vd, const uint64_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxei8_v_u64m8_tu(vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei8_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4_t test_vluxei8_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_f16mf4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei8_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2_t test_vluxei8_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_f16mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei8_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1_t test_vluxei8_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_f16m1_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei8_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2_t test_vluxei8_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxei8_v_f16m2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei8_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, const _Float16 *rs1, vuint8m2_t rs2, size_t vl) { +vfloat16m4_t test_vluxei8_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + const _Float16 *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxei8_v_f16m4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vluxei8_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, const _Float16 *rs1, vuint8m4_t rs2, size_t vl) { +vfloat16m8_t test_vluxei8_v_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + const _Float16 *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vluxei8_v_f16m8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei8_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2_t test_vluxei8_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_f32mf2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei8_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1_t test_vluxei8_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_f32m1_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei8_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2_t test_vluxei8_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_f32m2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei8_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, const float *rs1, vuint8m1_t rs2, size_t vl) { +vfloat32m4_t test_vluxei8_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxei8_v_f32m4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei8_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, const float *rs1, vuint8m2_t rs2, size_t vl) { +vfloat32m8_t test_vluxei8_v_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + const float *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxei8_v_f32m8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei8_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1_t test_vluxei8_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_f64m1_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei8_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2_t test_vluxei8_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_f64m2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei8_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, const double *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat64m4_t test_vluxei8_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_f64m4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei8_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, const double *rs1, vuint8m1_t rs2, size_t vl) { +vfloat64m8_t test_vluxei8_v_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxei8_v_f64m8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei8_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8_t test_vluxei8_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_i8mf8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei8_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4_t test_vluxei8_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_i8mf4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei8_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2_t test_vluxei8_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_i8mf2_tum(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei8_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1_t test_vluxei8_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxei8_v_i8m1_tum(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vluxei8_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2_t test_vluxei8_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vluxei8_v_i8m2_tum(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vluxei8_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { +vint8m4_t test_vluxei8_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, + vuint8m4_t rs2, size_t vl) { return __riscv_vluxei8_v_i8m4_tum(vm, vd, rs1, rs2, vl); } -vint8m8_t test_vluxei8_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, vuint8m8_t rs2, size_t vl) { +vint8m8_t test_vluxei8_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, + vuint8m8_t rs2, size_t vl) { return __riscv_vluxei8_v_i8m8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei8_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4_t test_vluxei8_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_i16mf4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei8_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2_t test_vluxei8_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_i16mf2_tum(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei8_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1_t test_vluxei8_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_i16m1_tum(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei8_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2_t test_vluxei8_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxei8_v_i16m2_tum(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vluxei8_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { +vint16m4_t test_vluxei8_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, + const int16_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxei8_v_i16m4_tum(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vluxei8_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint8m4_t rs2, size_t vl) { +vint16m8_t test_vluxei8_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, + const int16_t *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vluxei8_v_i16m8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei8_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2_t test_vluxei8_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_i32mf2_tum(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei8_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1_t test_vluxei8_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_i32m1_tum(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei8_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2_t test_vluxei8_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_i32m2_tum(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei8_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { +vint32m4_t test_vluxei8_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxei8_v_i32m4_tum(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vluxei8_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint8m2_t rs2, size_t vl) { +vint32m8_t test_vluxei8_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, + const int32_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxei8_v_i32m8_tum(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei8_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1_t test_vluxei8_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_i64m1_tum(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei8_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2_t test_vluxei8_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_i64m2_tum(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei8_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint64m4_t test_vluxei8_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_i64m4_tum(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei8_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint8m1_t rs2, size_t vl) { +vint64m8_t test_vluxei8_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxei8_v_i64m8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8_t test_vluxei8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_u8mf8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4_t test_vluxei8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_u8mf4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2_t test_vluxei8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_u8mf2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1_t test_vluxei8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxei8_v_u8m1_tum(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2_t test_vluxei8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, + const uint8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxei8_v_u8m2_tum(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vluxei8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { +vuint8m4_t test_vluxei8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, + const uint8_t *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vluxei8_v_u8m4_tum(vm, vd, rs1, rs2, vl); } -vuint8m8_t test_vluxei8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { +vuint8m8_t test_vluxei8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, + const uint8_t *rs1, vuint8m8_t rs2, + size_t vl) { return __riscv_vluxei8_v_u8m8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4_t test_vluxei8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_u16mf4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2_t test_vluxei8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_u16mf2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1_t test_vluxei8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_u16m1_tum(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2_t test_vluxei8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxei8_v_u16m2_tum(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint16m4_t test_vluxei8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + const uint16_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxei8_v_u16m4_tum(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vluxei8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { +vuint16m8_t test_vluxei8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + const uint16_t *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vluxei8_v_u16m8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2_t test_vluxei8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_u32mf2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1_t test_vluxei8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_u32m1_tum(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2_t test_vluxei8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_u32m2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint32m4_t test_vluxei8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxei8_v_u32m4_tum(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint32m8_t test_vluxei8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + const uint32_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxei8_v_u32m8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1_t test_vluxei8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_u64m1_tum(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2_t test_vluxei8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_u64m2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint64m4_t test_vluxei8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_u64m4_tum(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint64m8_t test_vluxei8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxei8_v_u64m8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei8_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4_t test_vluxei8_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_f16mf4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei8_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2_t test_vluxei8_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_f16mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei8_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1_t test_vluxei8_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_f16m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei8_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2_t test_vluxei8_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxei8_v_f16m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei8_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, const _Float16 *rs1, vuint8m2_t rs2, size_t vl) { +vfloat16m4_t test_vluxei8_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + const _Float16 *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxei8_v_f16m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vluxei8_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, const _Float16 *rs1, vuint8m4_t rs2, size_t vl) { +vfloat16m8_t test_vluxei8_v_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + const _Float16 *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vluxei8_v_f16m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei8_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2_t test_vluxei8_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_f32mf2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei8_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1_t test_vluxei8_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_f32m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei8_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2_t test_vluxei8_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_f32m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei8_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, const float *rs1, vuint8m1_t rs2, size_t vl) { +vfloat32m4_t test_vluxei8_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxei8_v_f32m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei8_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, const float *rs1, vuint8m2_t rs2, size_t vl) { +vfloat32m8_t test_vluxei8_v_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + const float *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxei8_v_f32m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei8_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1_t test_vluxei8_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_f64m1_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei8_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2_t test_vluxei8_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_f64m2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei8_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, const double *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat64m4_t test_vluxei8_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_f64m4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei8_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, const double *rs1, vuint8m1_t rs2, size_t vl) { +vfloat64m8_t test_vluxei8_v_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxei8_v_f64m8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei8_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8_t test_vluxei8_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_i8mf8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei8_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4_t test_vluxei8_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_i8mf4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei8_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2_t test_vluxei8_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_i8mf2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei8_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1_t test_vluxei8_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxei8_v_i8m1_tumu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vluxei8_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2_t test_vluxei8_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vluxei8_v_i8m2_tumu(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vluxei8_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { +vint8m4_t test_vluxei8_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, + vuint8m4_t rs2, size_t vl) { return __riscv_vluxei8_v_i8m4_tumu(vm, vd, rs1, rs2, vl); } -vint8m8_t test_vluxei8_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, vuint8m8_t rs2, size_t vl) { +vint8m8_t test_vluxei8_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, + vuint8m8_t rs2, size_t vl) { return __riscv_vluxei8_v_i8m8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei8_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4_t test_vluxei8_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_i16mf4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei8_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2_t test_vluxei8_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_i16mf2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei8_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1_t test_vluxei8_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_i16m1_tumu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei8_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2_t test_vluxei8_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxei8_v_i16m2_tumu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vluxei8_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { +vint16m4_t test_vluxei8_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, + const int16_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxei8_v_i16m4_tumu(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vluxei8_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint8m4_t rs2, size_t vl) { +vint16m8_t test_vluxei8_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, + const int16_t *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vluxei8_v_i16m8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei8_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2_t test_vluxei8_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_i32mf2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei8_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1_t test_vluxei8_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_i32m1_tumu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei8_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2_t test_vluxei8_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_i32m2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei8_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { +vint32m4_t test_vluxei8_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxei8_v_i32m4_tumu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vluxei8_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint8m2_t rs2, size_t vl) { +vint32m8_t test_vluxei8_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, + const int32_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxei8_v_i32m8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei8_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1_t test_vluxei8_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_i64m1_tumu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei8_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2_t test_vluxei8_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_i64m2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei8_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint64m4_t test_vluxei8_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_i64m4_tumu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei8_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint8m1_t rs2, size_t vl) { +vint64m8_t test_vluxei8_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxei8_v_i64m8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8_t test_vluxei8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_u8mf8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4_t test_vluxei8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_u8mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2_t test_vluxei8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_u8mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1_t test_vluxei8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxei8_v_u8m1_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2_t test_vluxei8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + const uint8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxei8_v_u8m2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vluxei8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { +vuint8m4_t test_vluxei8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + const uint8_t *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vluxei8_v_u8m4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m8_t test_vluxei8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { +vuint8m8_t test_vluxei8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, + const uint8_t *rs1, vuint8m8_t rs2, + size_t vl) { return __riscv_vluxei8_v_u8m8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4_t test_vluxei8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_u16mf4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2_t test_vluxei8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_u16mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1_t test_vluxei8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_u16m1_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2_t test_vluxei8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxei8_v_u16m2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint16m4_t test_vluxei8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + const uint16_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxei8_v_u16m4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vluxei8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { +vuint16m8_t test_vluxei8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + const uint16_t *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vluxei8_v_u16m8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2_t test_vluxei8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_u32mf2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1_t test_vluxei8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_u32m1_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2_t test_vluxei8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_u32m2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint32m4_t test_vluxei8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxei8_v_u32m4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint32m8_t test_vluxei8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + const uint32_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxei8_v_u32m8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1_t test_vluxei8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_u64m1_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2_t test_vluxei8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_u64m2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint64m4_t test_vluxei8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_u64m4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint64m8_t test_vluxei8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxei8_v_u64m8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4_t test_vluxei8_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4_t test_vluxei8_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + const _Float16 *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_f16mf4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2_t test_vluxei8_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2_t test_vluxei8_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + const _Float16 *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_f16mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1_t test_vluxei8_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1_t test_vluxei8_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + const _Float16 *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_f16m1_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2_t test_vluxei8_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2_t test_vluxei8_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + const _Float16 *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxei8_v_f16m2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4_t test_vluxei8_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, const _Float16 *rs1, vuint8m2_t rs2, size_t vl) { +vfloat16m4_t test_vluxei8_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + const _Float16 *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxei8_v_f16m4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m8_t test_vluxei8_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, const _Float16 *rs1, vuint8m4_t rs2, size_t vl) { +vfloat16m8_t test_vluxei8_v_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + const _Float16 *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vluxei8_v_f16m8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2_t test_vluxei8_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2_t test_vluxei8_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + const float *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_f32mf2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1_t test_vluxei8_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1_t test_vluxei8_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_f32m1_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2_t test_vluxei8_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2_t test_vluxei8_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + const float *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_f32m2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4_t test_vluxei8_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, const float *rs1, vuint8m1_t rs2, size_t vl) { +vfloat32m4_t test_vluxei8_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + const float *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxei8_v_f32m4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m8_t test_vluxei8_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, const float *rs1, vuint8m2_t rs2, size_t vl) { +vfloat32m8_t test_vluxei8_v_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + const float *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxei8_v_f32m8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1_t test_vluxei8_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1_t test_vluxei8_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_f64m1_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2_t test_vluxei8_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2_t test_vluxei8_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + const double *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_f64m2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4_t test_vluxei8_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, const double *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat64m4_t test_vluxei8_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + const double *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_f64m4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m8_t test_vluxei8_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, const double *rs1, vuint8m1_t rs2, size_t vl) { +vfloat64m8_t test_vluxei8_v_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + const double *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxei8_v_f64m8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8_t test_vluxei8_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8_t test_vluxei8_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_i8mf8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4_t test_vluxei8_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4_t test_vluxei8_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_i8mf4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2_t test_vluxei8_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2_t test_vluxei8_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_i8mf2_mu(vm, vd, rs1, rs2, vl); } -vint8m1_t test_vluxei8_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1_t test_vluxei8_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, const int8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxei8_v_i8m1_mu(vm, vd, rs1, rs2, vl); } -vint8m2_t test_vluxei8_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2_t test_vluxei8_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, const int8_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vluxei8_v_i8m2_mu(vm, vd, rs1, rs2, vl); } -vint8m4_t test_vluxei8_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { +vint8m4_t test_vluxei8_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, const int8_t *rs1, + vuint8m4_t rs2, size_t vl) { return __riscv_vluxei8_v_i8m4_mu(vm, vd, rs1, rs2, vl); } -vint8m8_t test_vluxei8_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, vuint8m8_t rs2, size_t vl) { +vint8m8_t test_vluxei8_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, const int8_t *rs1, + vuint8m8_t rs2, size_t vl) { return __riscv_vluxei8_v_i8m8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4_t test_vluxei8_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4_t test_vluxei8_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + const int16_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_i16mf4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2_t test_vluxei8_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2_t test_vluxei8_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + const int16_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_i16mf2_mu(vm, vd, rs1, rs2, vl); } -vint16m1_t test_vluxei8_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1_t test_vluxei8_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_i16m1_mu(vm, vd, rs1, rs2, vl); } -vint16m2_t test_vluxei8_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2_t test_vluxei8_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, + const int16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxei8_v_i16m2_mu(vm, vd, rs1, rs2, vl); } -vint16m4_t test_vluxei8_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { +vint16m4_t test_vluxei8_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, + const int16_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxei8_v_i16m4_mu(vm, vd, rs1, rs2, vl); } -vint16m8_t test_vluxei8_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, const int16_t *rs1, vuint8m4_t rs2, size_t vl) { +vint16m8_t test_vluxei8_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, + const int16_t *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vluxei8_v_i16m8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2_t test_vluxei8_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2_t test_vluxei8_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + const int32_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_i32mf2_mu(vm, vd, rs1, rs2, vl); } -vint32m1_t test_vluxei8_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1_t test_vluxei8_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_i32m1_mu(vm, vd, rs1, rs2, vl); } -vint32m2_t test_vluxei8_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2_t test_vluxei8_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, + const int32_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_i32m2_mu(vm, vd, rs1, rs2, vl); } -vint32m4_t test_vluxei8_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { +vint32m4_t test_vluxei8_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, + const int32_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxei8_v_i32m4_mu(vm, vd, rs1, rs2, vl); } -vint32m8_t test_vluxei8_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, const int32_t *rs1, vuint8m2_t rs2, size_t vl) { +vint32m8_t test_vluxei8_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, + const int32_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxei8_v_i32m8_mu(vm, vd, rs1, rs2, vl); } -vint64m1_t test_vluxei8_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1_t test_vluxei8_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_i64m1_mu(vm, vd, rs1, rs2, vl); } -vint64m2_t test_vluxei8_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2_t test_vluxei8_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, + const int64_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_i64m2_mu(vm, vd, rs1, rs2, vl); } -vint64m4_t test_vluxei8_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint64m4_t test_vluxei8_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, + const int64_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_i64m4_mu(vm, vd, rs1, rs2, vl); } -vint64m8_t test_vluxei8_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, const int64_t *rs1, vuint8m1_t rs2, size_t vl) { +vint64m8_t test_vluxei8_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, + const int64_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxei8_v_i64m8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8_t test_vluxei8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8_t test_vluxei8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_u8mf8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4_t test_vluxei8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4_t test_vluxei8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_u8mf4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2_t test_vluxei8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2_t test_vluxei8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_u8mf2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1_t test_vluxei8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1_t test_vluxei8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxei8_v_u8m1_mu(vm, vd, rs1, rs2, vl); } -vuint8m2_t test_vluxei8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2_t test_vluxei8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, + const uint8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxei8_v_u8m2_mu(vm, vd, rs1, rs2, vl); } -vuint8m4_t test_vluxei8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { +vuint8m4_t test_vluxei8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, + const uint8_t *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vluxei8_v_u8m4_mu(vm, vd, rs1, rs2, vl); } -vuint8m8_t test_vluxei8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, const uint8_t *rs1, vuint8m8_t rs2, size_t vl) { +vuint8m8_t test_vluxei8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, + const uint8_t *rs1, vuint8m8_t rs2, + size_t vl) { return __riscv_vluxei8_v_u8m8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4_t test_vluxei8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4_t test_vluxei8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + const uint16_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_u16mf4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2_t test_vluxei8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2_t test_vluxei8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + const uint16_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_u16mf2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1_t test_vluxei8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1_t test_vluxei8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + const uint16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_u16m1_mu(vm, vd, rs1, rs2, vl); } -vuint16m2_t test_vluxei8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2_t test_vluxei8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + const uint16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxei8_v_u16m2_mu(vm, vd, rs1, rs2, vl); } -vuint16m4_t test_vluxei8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint16m4_t test_vluxei8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + const uint16_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxei8_v_u16m4_mu(vm, vd, rs1, rs2, vl); } -vuint16m8_t test_vluxei8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, const uint16_t *rs1, vuint8m4_t rs2, size_t vl) { +vuint16m8_t test_vluxei8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + const uint16_t *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vluxei8_v_u16m8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2_t test_vluxei8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2_t test_vluxei8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + const uint32_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_u32mf2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1_t test_vluxei8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1_t test_vluxei8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + const uint32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_u32m1_mu(vm, vd, rs1, rs2, vl); } -vuint32m2_t test_vluxei8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2_t test_vluxei8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + const uint32_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_u32m2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4_t test_vluxei8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint32m4_t test_vluxei8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + const uint32_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxei8_v_u32m4_mu(vm, vd, rs1, rs2, vl); } -vuint32m8_t test_vluxei8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, const uint32_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint32m8_t test_vluxei8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + const uint32_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxei8_v_u32m8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1_t test_vluxei8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1_t test_vluxei8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + const uint64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxei8_v_u64m1_mu(vm, vd, rs1, rs2, vl); } -vuint64m2_t test_vluxei8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2_t test_vluxei8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + const uint64_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxei8_v_u64m2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4_t test_vluxei8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint64m4_t test_vluxei8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + const uint64_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxei8_v_u64m4_mu(vm, vd, rs1, rs2, vl); } -vuint64m8_t test_vluxei8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, const uint64_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint64m8_t test_vluxei8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + const uint64_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxei8_v_u64m8_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxseg2ei16.c b/auto-generated/policy_funcs/llvm-api-tests/vluxseg2ei16.c index 7dfa549f0..f07a5165f 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxseg2ei16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxseg2ei16.c @@ -1,775 +1,1153 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tu(vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f16mf4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tu(vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f16mf2x2_tu(vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tu(vfloat16m1x2_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tu(vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f16m1x2_tu(vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tu(vfloat16m2x2_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tu(vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f16m2x2_tu(vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tu(vfloat16m4x2_t vd, const _Float16 *rs1, vuint16m4_t rs2, size_t vl) { +vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tu(vfloat16m4x2_t vd, + const _Float16 *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f16m4x2_tu(vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tu(vfloat32mf2x2_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f32mf2x2_tu(vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tu(vfloat32m1x2_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tu(vfloat32m1x2_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f32m1x2_tu(vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tu(vfloat32m2x2_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tu(vfloat32m2x2_t vd, + const float *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei16_v_f32m2x2_tu(vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tu(vfloat32m4x2_t vd, const float *rs1, vuint16m2_t rs2, size_t vl) { +vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tu(vfloat32m4x2_t vd, + const float *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei16_v_f32m4x2_tu(vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tu(vfloat64m1x2_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tu(vfloat64m1x2_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f64m1x2_tu(vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tu(vfloat64m2x2_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tu(vfloat64m2x2_t vd, + const double *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f64m2x2_tu(vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tu(vfloat64m4x2_t vd, const double *rs1, vuint16m1_t rs2, size_t vl) { +vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tu(vfloat64m4x2_t vd, + const double *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f64m4x2_tu(vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i8mf8x2_tu(vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i8mf4x2_tu(vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i8mf2x2_tu(vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i8m1x2_tu(vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i8m2x2_tu(vd, rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tu(vint8m4x2_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { +vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tu(vint8m4x2_t vd, const int8_t *rs1, + vuint16m8_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i8m4x2_tu(vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tu(vint16mf4x2_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i16mf4x2_tu(vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tu(vint16mf2x2_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i16mf2x2_tu(vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i16m1x2_tu(vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i16m2x2_tu(vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { +vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i16m4x2_tu(vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tu(vint32mf2x2_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i32mf2x2_tu(vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i32m1x2_tu(vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i32m2x2_tu(vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { +vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i32m4x2_tu(vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i64m1x2_tu(vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i64m2x2_tu(vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { +vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i64m4x2_tu(vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tu(vuint8mf8x2_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u8mf8x2_tu(vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tu(vuint8mf4x2_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u8mf4x2_tu(vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tu(vuint8mf2x2_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u8mf2x2_tu(vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u8m1x2_tu(vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u8m2x2_tu(vd, rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tu(vuint8m4x2_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { +vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tu(vuint8m4x2_t vd, const uint8_t *rs1, + vuint16m8_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u8m4x2_tu(vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tu(vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u16mf4x2_tu(vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tu(vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u16mf2x2_tu(vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tu(vuint16m1x2_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u16m1x2_tu(vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tu(vuint16m2x2_t vd, + const uint16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u16m2x2_tu(vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tu(vuint16m4x2_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tu(vuint16m4x2_t vd, + const uint16_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u16m4x2_tu(vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tu(vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u32mf2x2_tu(vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tu(vuint32m1x2_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u32m1x2_tu(vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tu(vuint32m2x2_t vd, + const uint32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u32m2x2_tu(vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tu(vuint32m4x2_t vd, + const uint32_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u32m4x2_tu(vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tu(vuint64m1x2_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u64m1x2_tu(vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tu(vuint64m2x2_t vd, + const uint64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u64m2x2_tu(vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tu(vuint64m4x2_t vd, + const uint64_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u64m4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tum(vbool64_t vm, + vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tum(vbool32_t vm, + vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f16m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f16m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, const _Float16 *rs1, vuint16m4_t rs2, size_t vl) { +vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, + const _Float16 *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f16m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tum(vbool64_t vm, + vfloat32mf2x2_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f32m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f32m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, vuint16m2_t rs2, size_t vl) { +vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f32m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f64m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f64m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, vuint16m1_t rs2, size_t vl) { +vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f64m4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei16_v_i8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei16_v_i8m1x2_tum(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, + const int8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxseg2ei16_v_i8m2x2_tum(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tum(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { +vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tum(vbool2_t vm, vint8m4x2_t vd, + const int8_t *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vluxseg2ei16_v_i8m4x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i16m1x2_tum(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i16m2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { +vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, + const int16_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i16m4x2_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i32m1x2_tum(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i32m2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { +vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i32m4x2_tum(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i64m1x2_tum(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i64m2x2_tum(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { +vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i64m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei16_v_u8m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, + const uint8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxseg2ei16_v_u8m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tum(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { +vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tum(vbool2_t vm, vuint8m4x2_t vd, + const uint8_t *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vluxseg2ei16_v_u8m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u16m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u16m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, + const uint16_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u16m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u32m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u32m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u32m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u64m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u64m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u64m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_tumu(vbool64_t vm, + vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_tumu(vbool32_t vm, + vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, const _Float16 *rs1, vuint16m4_t rs2, size_t vl) { +vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, + const _Float16 *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_tumu(vbool64_t vm, + vfloat32mf2x2_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, vuint16m2_t rs2, size_t vl) { +vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, vuint16m1_t rs2, size_t vl) { +vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei16_v_i8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, + const int8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxseg2ei16_v_i8m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tumu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { +vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_tumu(vbool2_t vm, vint8m4x2_t vd, + const int8_t *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vluxseg2ei16_v_i8m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { +vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, + const int16_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { +vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { +vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, + const uint8_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u8m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tumu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { +vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_tumu(vbool2_t vm, vuint8m4x2_t vd, + const uint8_t *rs1, + vuint16m8_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u8m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_tumu(vbool64_t vm, + vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_tumu(vbool32_t vm, + vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, + const uint16_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_tumu(vbool64_t vm, + vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x2_t test_vluxseg2ei16_v_f16mf4x2_mu(vbool64_t vm, + vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x2_t test_vluxseg2ei16_v_f16mf2x2_mu(vbool32_t vm, + vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x2_t test_vluxseg2ei16_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f16m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2x2_t test_vluxseg2ei16_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f16m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, const _Float16 *rs1, vuint16m4_t rs2, size_t vl) { +vfloat16m4x2_t test_vluxseg2ei16_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, + const _Float16 *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f16m4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x2_t test_vluxseg2ei16_v_f32mf2x2_mu(vbool64_t vm, + vfloat32mf2x2_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x2_t test_vluxseg2ei16_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f32m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2x2_t test_vluxseg2ei16_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei16_v_f32m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, vuint16m2_t rs2, size_t vl) { +vfloat32m4x2_t test_vluxseg2ei16_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei16_v_f32m4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x2_t test_vluxseg2ei16_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f64m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2x2_t test_vluxseg2ei16_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f64m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, vuint16m1_t rs2, size_t vl) { +vfloat64m4x2_t test_vluxseg2ei16_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_f64m4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x2_t test_vluxseg2ei16_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxseg2ei16_v_i8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x2_t test_vluxseg2ei16_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxseg2ei16_v_i8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x2_t test_vluxseg2ei16_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei16_v_i8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x2_t test_vluxseg2ei16_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei16_v_i8m1x2_mu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2x2_t test_vluxseg2ei16_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, + const int8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxseg2ei16_v_i8m2x2_mu(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_mu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint16m8_t rs2, size_t vl) { +vint8m4x2_t test_vluxseg2ei16_v_i8m4x2_mu(vbool2_t vm, vint8m4x2_t vd, + const int8_t *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vluxseg2ei16_v_i8m4x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x2_t test_vluxseg2ei16_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x2_t test_vluxseg2ei16_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x2_t test_vluxseg2ei16_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei16_v_i16m1x2_mu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2x2_t test_vluxseg2ei16_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei16_v_i16m2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint16m4_t rs2, size_t vl) { +vint16m4x2_t test_vluxseg2ei16_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, + const int16_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxseg2ei16_v_i16m4x2_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x2_t test_vluxseg2ei16_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x2_t test_vluxseg2ei16_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i32m1x2_mu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2x2_t test_vluxseg2ei16_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei16_v_i32m2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint16m2_t rs2, size_t vl) { +vint32m4x2_t test_vluxseg2ei16_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei16_v_i32m4x2_mu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x2_t test_vluxseg2ei16_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i64m1x2_mu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2x2_t test_vluxseg2ei16_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_i64m2x2_mu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint16m1_t rs2, size_t vl) { +vint64m4x2_t test_vluxseg2ei16_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei16_v_i64m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x2_t test_vluxseg2ei16_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x2_t test_vluxseg2ei16_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x2_t test_vluxseg2ei16_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x2_t test_vluxseg2ei16_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei16_v_u8m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2x2_t test_vluxseg2ei16_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, + const uint8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxseg2ei16_v_u8m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_mu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint16m8_t rs2, size_t vl) { +vuint8m4x2_t test_vluxseg2ei16_v_u8m4x2_mu(vbool2_t vm, vuint8m4x2_t vd, + const uint8_t *rs1, vuint16m8_t rs2, + size_t vl) { return __riscv_vluxseg2ei16_v_u8m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x2_t test_vluxseg2ei16_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x2_t test_vluxseg2ei16_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x2_t test_vluxseg2ei16_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u16m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2x2_t test_vluxseg2ei16_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u16m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint16m4x2_t test_vluxseg2ei16_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, + const uint16_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u16m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x2_t test_vluxseg2ei16_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x2_t test_vluxseg2ei16_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u32m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2x2_t test_vluxseg2ei16_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u32m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint32m4x2_t test_vluxseg2ei16_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u32m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x2_t test_vluxseg2ei16_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u64m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2x2_t test_vluxseg2ei16_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u64m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint64m4x2_t test_vluxseg2ei16_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg2ei16_v_u64m4x2_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxseg2ei32.c b/auto-generated/policy_funcs/llvm-api-tests/vluxseg2ei32.c index 0d6f8f6d2..0dcebb83f 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxseg2ei32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxseg2ei32.c @@ -1,743 +1,1107 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tu(vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f16mf4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tu(vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f16mf2x2_tu(vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tu(vfloat16m1x2_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tu(vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f16m1x2_tu(vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tu(vfloat16m2x2_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tu(vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f16m2x2_tu(vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tu(vfloat16m4x2_t vd, const _Float16 *rs1, vuint32m8_t rs2, size_t vl) { +vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tu(vfloat16m4x2_t vd, + const _Float16 *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f16m4x2_tu(vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tu(vfloat32mf2x2_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f32mf2x2_tu(vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tu(vfloat32m1x2_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tu(vfloat32m1x2_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei32_v_f32m1x2_tu(vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tu(vfloat32m2x2_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tu(vfloat32m2x2_t vd, + const float *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei32_v_f32m2x2_tu(vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tu(vfloat32m4x2_t vd, const float *rs1, vuint32m4_t rs2, size_t vl) { +vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tu(vfloat32m4x2_t vd, + const float *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg2ei32_v_f32m4x2_tu(vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tu(vfloat64m1x2_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tu(vfloat64m1x2_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f64m1x2_tu(vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tu(vfloat64m2x2_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tu(vfloat64m2x2_t vd, + const double *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f64m2x2_tu(vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tu(vfloat64m4x2_t vd, const double *rs1, vuint32m2_t rs2, size_t vl) { +vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tu(vfloat64m4x2_t vd, + const double *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f64m4x2_tu(vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i8mf8x2_tu(vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i8mf4x2_tu(vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i8mf2x2_tu(vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i8m1x2_tu(vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i8m2x2_tu(vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tu(vint16mf4x2_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i16mf4x2_tu(vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tu(vint16mf2x2_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i16mf2x2_tu(vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i16m1x2_tu(vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i16m2x2_tu(vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { +vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i16m4x2_tu(vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tu(vint32mf2x2_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i32mf2x2_tu(vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i32m1x2_tu(vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i32m2x2_tu(vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { +vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i32m4x2_tu(vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i64m1x2_tu(vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i64m2x2_tu(vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { +vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i64m4x2_tu(vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tu(vuint8mf8x2_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u8mf8x2_tu(vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tu(vuint8mf4x2_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u8mf4x2_tu(vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tu(vuint8mf2x2_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u8mf2x2_tu(vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u8m1x2_tu(vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u8m2x2_tu(vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tu(vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u16mf4x2_tu(vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tu(vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u16mf2x2_tu(vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tu(vuint16m1x2_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u16m1x2_tu(vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tu(vuint16m2x2_t vd, + const uint16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u16m2x2_tu(vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tu(vuint16m4x2_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tu(vuint16m4x2_t vd, + const uint16_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u16m4x2_tu(vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tu(vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u32mf2x2_tu(vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tu(vuint32m1x2_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u32m1x2_tu(vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tu(vuint32m2x2_t vd, + const uint32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u32m2x2_tu(vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tu(vuint32m4x2_t vd, + const uint32_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u32m4x2_tu(vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tu(vuint64m1x2_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u64m1x2_tu(vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tu(vuint64m2x2_t vd, + const uint64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u64m2x2_tu(vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tu(vuint64m4x2_t vd, + const uint64_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u64m4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tum(vbool64_t vm, + vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tum(vbool32_t vm, + vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f16m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f16m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, const _Float16 *rs1, vuint32m8_t rs2, size_t vl) { +vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, + const _Float16 *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f16m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tum(vbool64_t vm, + vfloat32mf2x2_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f32m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f32m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, vuint32m4_t rs2, size_t vl) { +vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f32m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f64m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f64m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, vuint32m2_t rs2, size_t vl) { +vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f64m4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei32_v_i8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei32_v_i8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg2ei32_v_i8m1x2_tum(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, + const int8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxseg2ei32_v_i8m2x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i16m1x2_tum(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i16m2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { +vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, + const int16_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i16m4x2_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i32m1x2_tum(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i32m2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { +vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i32m4x2_tum(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i64m1x2_tum(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i64m2x2_tum(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { +vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i64m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg2ei32_v_u8m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, + const uint8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxseg2ei32_v_u8m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u16m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u16m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, + const uint16_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u16m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u32m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u32m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u32m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u64m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u64m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u64m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_tumu(vbool64_t vm, + vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_tumu(vbool32_t vm, + vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, const _Float16 *rs1, vuint32m8_t rs2, size_t vl) { +vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, + const _Float16 *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_tumu(vbool64_t vm, + vfloat32mf2x2_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, vuint32m4_t rs2, size_t vl) { +vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, vuint32m2_t rs2, size_t vl) { +vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg2ei32_v_i8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, + const int8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxseg2ei32_v_i8m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { +vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, + const int16_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { +vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { +vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, + const uint8_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u8m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_tumu(vbool64_t vm, + vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_tumu(vbool32_t vm, + vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, + const uint16_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_tumu(vbool64_t vm, + vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x2_t test_vluxseg2ei32_v_f16mf4x2_mu(vbool64_t vm, + vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x2_t test_vluxseg2ei32_v_f16mf2x2_mu(vbool32_t vm, + vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x2_t test_vluxseg2ei32_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f16m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2x2_t test_vluxseg2ei32_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f16m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, const _Float16 *rs1, vuint32m8_t rs2, size_t vl) { +vfloat16m4x2_t test_vluxseg2ei32_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, + const _Float16 *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f16m4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x2_t test_vluxseg2ei32_v_f32mf2x2_mu(vbool64_t vm, + vfloat32mf2x2_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x2_t test_vluxseg2ei32_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei32_v_f32m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2x2_t test_vluxseg2ei32_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei32_v_f32m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, vuint32m4_t rs2, size_t vl) { +vfloat32m4x2_t test_vluxseg2ei32_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg2ei32_v_f32m4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x2_t test_vluxseg2ei32_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f64m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2x2_t test_vluxseg2ei32_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f64m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, vuint32m2_t rs2, size_t vl) { +vfloat64m4x2_t test_vluxseg2ei32_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_f64m4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x2_t test_vluxseg2ei32_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxseg2ei32_v_i8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x2_t test_vluxseg2ei32_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei32_v_i8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x2_t test_vluxseg2ei32_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei32_v_i8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x2_t test_vluxseg2ei32_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg2ei32_v_i8m1x2_mu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2x2_t test_vluxseg2ei32_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, + const int8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxseg2ei32_v_i8m2x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x2_t test_vluxseg2ei32_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x2_t test_vluxseg2ei32_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x2_t test_vluxseg2ei32_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei32_v_i16m1x2_mu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2x2_t test_vluxseg2ei32_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg2ei32_v_i16m2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint32m8_t rs2, size_t vl) { +vint16m4x2_t test_vluxseg2ei32_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, + const int16_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxseg2ei32_v_i16m4x2_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x2_t test_vluxseg2ei32_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x2_t test_vluxseg2ei32_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei32_v_i32m1x2_mu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2x2_t test_vluxseg2ei32_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei32_v_i32m2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint32m4_t rs2, size_t vl) { +vint32m4x2_t test_vluxseg2ei32_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg2ei32_v_i32m4x2_mu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x2_t test_vluxseg2ei32_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_i64m1x2_mu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2x2_t test_vluxseg2ei32_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei32_v_i64m2x2_mu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint32m2_t rs2, size_t vl) { +vint64m4x2_t test_vluxseg2ei32_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei32_v_i64m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x2_t test_vluxseg2ei32_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x2_t test_vluxseg2ei32_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x2_t test_vluxseg2ei32_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x2_t test_vluxseg2ei32_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg2ei32_v_u8m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2x2_t test_vluxseg2ei32_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, + const uint8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxseg2ei32_v_u8m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x2_t test_vluxseg2ei32_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x2_t test_vluxseg2ei32_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x2_t test_vluxseg2ei32_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u16m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2x2_t test_vluxseg2ei32_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u16m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint16m4x2_t test_vluxseg2ei32_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, + const uint16_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u16m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x2_t test_vluxseg2ei32_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x2_t test_vluxseg2ei32_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u32m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2x2_t test_vluxseg2ei32_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u32m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint32m4x2_t test_vluxseg2ei32_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u32m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x2_t test_vluxseg2ei32_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u64m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2x2_t test_vluxseg2ei32_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u64m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint64m4x2_t test_vluxseg2ei32_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg2ei32_v_u64m4x2_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxseg2ei64.c b/auto-generated/policy_funcs/llvm-api-tests/vluxseg2ei64.c index f2f86775c..e02cb3778 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxseg2ei64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxseg2ei64.c @@ -1,663 +1,990 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tu(vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f16mf4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tu(vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f16mf2x2_tu(vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tu(vfloat16m1x2_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tu(vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f16m1x2_tu(vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tu(vfloat16m2x2_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tu(vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f16m2x2_tu(vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tu(vfloat32mf2x2_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f32mf2x2_tu(vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tu(vfloat32m1x2_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tu(vfloat32m1x2_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei64_v_f32m1x2_tu(vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tu(vfloat32m2x2_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tu(vfloat32m2x2_t vd, + const float *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxseg2ei64_v_f32m2x2_tu(vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tu(vfloat32m4x2_t vd, const float *rs1, vuint64m8_t rs2, size_t vl) { +vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tu(vfloat32m4x2_t vd, + const float *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg2ei64_v_f32m4x2_tu(vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tu(vfloat64m1x2_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tu(vfloat64m1x2_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f64m1x2_tu(vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tu(vfloat64m2x2_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tu(vfloat64m2x2_t vd, + const double *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f64m2x2_tu(vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tu(vfloat64m4x2_t vd, const double *rs1, vuint64m4_t rs2, size_t vl) { +vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tu(vfloat64m4x2_t vd, + const double *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f64m4x2_tu(vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i8mf8x2_tu(vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i8mf4x2_tu(vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i8mf2x2_tu(vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i8m1x2_tu(vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tu(vint16mf4x2_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i16mf4x2_tu(vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tu(vint16mf2x2_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i16mf2x2_tu(vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i16m1x2_tu(vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i16m2x2_tu(vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tu(vint32mf2x2_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i32mf2x2_tu(vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i32m1x2_tu(vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i32m2x2_tu(vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { +vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i32m4x2_tu(vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i64m1x2_tu(vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i64m2x2_tu(vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { +vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i64m4x2_tu(vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tu(vuint8mf8x2_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u8mf8x2_tu(vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tu(vuint8mf4x2_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u8mf4x2_tu(vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tu(vuint8mf2x2_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u8mf2x2_tu(vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u8m1x2_tu(vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tu(vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u16mf4x2_tu(vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tu(vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u16mf2x2_tu(vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tu(vuint16m1x2_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u16m1x2_tu(vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tu(vuint16m2x2_t vd, + const uint16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u16m2x2_tu(vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tu(vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u32mf2x2_tu(vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tu(vuint32m1x2_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u32m1x2_tu(vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tu(vuint32m2x2_t vd, + const uint32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u32m2x2_tu(vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tu(vuint32m4x2_t vd, + const uint32_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u32m4x2_tu(vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tu(vuint64m1x2_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u64m1x2_tu(vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tu(vuint64m2x2_t vd, + const uint64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u64m2x2_tu(vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tu(vuint64m4x2_t vd, + const uint64_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u64m4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tum(vbool64_t vm, + vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tum(vbool32_t vm, + vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f16m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f16m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tum(vbool64_t vm, + vfloat32mf2x2_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f32m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f32m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, vuint64m8_t rs2, size_t vl) { +vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f32m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f64m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f64m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, vuint64m4_t rs2, size_t vl) { +vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f64m4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei64_v_i8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei64_v_i8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxseg2ei64_v_i8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg2ei64_v_i8m1x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i16m1x2_tum(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i16m2x2_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i32m1x2_tum(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i32m2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { +vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i32m4x2_tum(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i64m1x2_tum(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i64m2x2_tum(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { +vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i64m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg2ei64_v_u8m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u16m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u16m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u32m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u32m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u32m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u64m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u64m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u64m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_tumu(vbool64_t vm, + vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_tumu(vbool32_t vm, + vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_tumu(vbool64_t vm, + vfloat32mf2x2_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, vuint64m8_t rs2, size_t vl) { +vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, vuint64m4_t rs2, size_t vl) { +vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg2ei64_v_i8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { +vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { +vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_tumu(vbool64_t vm, + vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_tumu(vbool32_t vm, + vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_tumu(vbool64_t vm, + vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x2_t test_vluxseg2ei64_v_f16mf4x2_mu(vbool64_t vm, + vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x2_t test_vluxseg2ei64_v_f16mf2x2_mu(vbool32_t vm, + vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x2_t test_vluxseg2ei64_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f16m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2x2_t test_vluxseg2ei64_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f16m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x2_t test_vluxseg2ei64_v_f32mf2x2_mu(vbool64_t vm, + vfloat32mf2x2_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x2_t test_vluxseg2ei64_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei64_v_f32m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2x2_t test_vluxseg2ei64_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxseg2ei64_v_f32m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, vuint64m8_t rs2, size_t vl) { +vfloat32m4x2_t test_vluxseg2ei64_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg2ei64_v_f32m4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x2_t test_vluxseg2ei64_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f64m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2x2_t test_vluxseg2ei64_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f64m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, vuint64m4_t rs2, size_t vl) { +vfloat64m4x2_t test_vluxseg2ei64_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_f64m4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x2_t test_vluxseg2ei64_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei64_v_i8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x2_t test_vluxseg2ei64_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei64_v_i8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x2_t test_vluxseg2ei64_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxseg2ei64_v_i8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x2_t test_vluxseg2ei64_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg2ei64_v_i8m1x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x2_t test_vluxseg2ei64_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x2_t test_vluxseg2ei64_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x2_t test_vluxseg2ei64_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxseg2ei64_v_i16m1x2_mu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2x2_t test_vluxseg2ei64_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg2ei64_v_i16m2x2_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x2_t test_vluxseg2ei64_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_i32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x2_t test_vluxseg2ei64_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei64_v_i32m1x2_mu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2x2_t test_vluxseg2ei64_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxseg2ei64_v_i32m2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint64m8_t rs2, size_t vl) { +vint32m4x2_t test_vluxseg2ei64_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg2ei64_v_i32m4x2_mu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x2_t test_vluxseg2ei64_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei64_v_i64m1x2_mu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2x2_t test_vluxseg2ei64_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei64_v_i64m2x2_mu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint64m4_t rs2, size_t vl) { +vint64m4x2_t test_vluxseg2ei64_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxseg2ei64_v_i64m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x2_t test_vluxseg2ei64_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x2_t test_vluxseg2ei64_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x2_t test_vluxseg2ei64_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x2_t test_vluxseg2ei64_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg2ei64_v_u8m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x2_t test_vluxseg2ei64_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x2_t test_vluxseg2ei64_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x2_t test_vluxseg2ei64_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u16m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2x2_t test_vluxseg2ei64_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u16m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x2_t test_vluxseg2ei64_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x2_t test_vluxseg2ei64_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u32m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2x2_t test_vluxseg2ei64_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u32m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint32m4x2_t test_vluxseg2ei64_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u32m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x2_t test_vluxseg2ei64_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u64m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2x2_t test_vluxseg2ei64_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u64m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint64m4x2_t test_vluxseg2ei64_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg2ei64_v_u64m4x2_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxseg2ei8.c b/auto-generated/policy_funcs/llvm-api-tests/vluxseg2ei8.c index cfe68148e..18181c3df 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxseg2ei8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxseg2ei8.c @@ -1,775 +1,1147 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tu(vfloat16mf4x2_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tu(vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f16mf4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tu(vfloat16mf2x2_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tu(vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f16mf2x2_tu(vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tu(vfloat16m1x2_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tu(vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f16m1x2_tu(vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tu(vfloat16m2x2_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tu(vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f16m2x2_tu(vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tu(vfloat16m4x2_t vd, const _Float16 *rs1, vuint8m2_t rs2, size_t vl) { +vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tu(vfloat16m4x2_t vd, + const _Float16 *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f16m4x2_tu(vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tu(vfloat32mf2x2_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tu(vfloat32mf2x2_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f32mf2x2_tu(vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tu(vfloat32m1x2_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tu(vfloat32m1x2_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_f32m1x2_tu(vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tu(vfloat32m2x2_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tu(vfloat32m2x2_t vd, + const float *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_f32m2x2_tu(vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tu(vfloat32m4x2_t vd, const float *rs1, vuint8m1_t rs2, size_t vl) { +vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tu(vfloat32m4x2_t vd, + const float *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_f32m4x2_tu(vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tu(vfloat64m1x2_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tu(vfloat64m1x2_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_f64m1x2_tu(vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tu(vfloat64m2x2_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tu(vfloat64m2x2_t vd, + const double *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_f64m2x2_tu(vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tu(vfloat64m4x2_t vd, const double *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tu(vfloat64m4x2_t vd, + const double *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_f64m4x2_tu(vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tu(vint8mf8x2_t vd, const int8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i8mf8x2_tu(vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tu(vint8mf4x2_t vd, const int8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i8mf4x2_tu(vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tu(vint8mf2x2_t vd, const int8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i8mf2x2_tu(vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tu(vint8m1x2_t vd, const int8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i8m1x2_tu(vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tu(vint8m2x2_t vd, const int8_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i8m2x2_tu(vd, rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tu(vint8m4x2_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { +vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tu(vint8m4x2_t vd, const int8_t *rs1, + vuint8m4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i8m4x2_tu(vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tu(vint16mf4x2_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tu(vint16mf4x2_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i16mf4x2_tu(vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tu(vint16mf2x2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tu(vint16mf2x2_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i16mf2x2_tu(vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tu(vint16m1x2_t vd, const int16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i16m1x2_tu(vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tu(vint16m2x2_t vd, const int16_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i16m2x2_tu(vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { +vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tu(vint16m4x2_t vd, const int16_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i16m4x2_tu(vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tu(vint32mf2x2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tu(vint32mf2x2_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i32mf2x2_tu(vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tu(vint32m1x2_t vd, const int32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i32m1x2_tu(vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tu(vint32m2x2_t vd, const int32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i32m2x2_tu(vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { +vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tu(vint32m4x2_t vd, const int32_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i32m4x2_tu(vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tu(vint64m1x2_t vd, const int64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i64m1x2_tu(vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tu(vint64m2x2_t vd, const int64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i64m2x2_tu(vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tu(vint64m4x2_t vd, const int64_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i64m4x2_tu(vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tu(vuint8mf8x2_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tu(vuint8mf8x2_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_u8mf8x2_tu(vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tu(vuint8mf4x2_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tu(vuint8mf4x2_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_u8mf4x2_tu(vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tu(vuint8mf2x2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tu(vuint8mf2x2_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_u8mf2x2_tu(vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tu(vuint8m1x2_t vd, const uint8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u8m1x2_tu(vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tu(vuint8m2x2_t vd, const uint8_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u8m2x2_tu(vd, rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tu(vuint8m4x2_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { +vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tu(vuint8m4x2_t vd, const uint8_t *rs1, + vuint8m4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u8m4x2_tu(vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tu(vuint16mf4x2_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tu(vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u16mf4x2_tu(vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tu(vuint16mf2x2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tu(vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u16mf2x2_tu(vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tu(vuint16m1x2_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tu(vuint16m1x2_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u16m1x2_tu(vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tu(vuint16m2x2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tu(vuint16m2x2_t vd, + const uint16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_u16m2x2_tu(vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tu(vuint16m4x2_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tu(vuint16m4x2_t vd, + const uint16_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_u16m4x2_tu(vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tu(vuint32mf2x2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tu(vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u32mf2x2_tu(vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tu(vuint32m1x2_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tu(vuint32m1x2_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u32m1x2_tu(vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tu(vuint32m2x2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tu(vuint32m2x2_t vd, + const uint32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u32m2x2_tu(vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tu(vuint32m4x2_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tu(vuint32m4x2_t vd, + const uint32_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_u32m4x2_tu(vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tu(vuint64m1x2_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tu(vuint64m1x2_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u64m1x2_tu(vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tu(vuint64m2x2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tu(vuint64m2x2_t vd, + const uint64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u64m2x2_tu(vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tu(vuint64m4x2_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tu(vuint64m4x2_t vd, + const uint64_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u64m4x2_tu(vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tum(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tum(vbool64_t vm, + vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tum(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tum(vbool32_t vm, + vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tum(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f16m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tum(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f16m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, const _Float16 *rs1, vuint8m2_t rs2, size_t vl) { +vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tum(vbool4_t vm, vfloat16m4x2_t vd, + const _Float16 *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f16m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tum(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tum(vbool64_t vm, + vfloat32mf2x2_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tum(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_f32m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tum(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_f32m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, vuint8m1_t rs2, size_t vl) { +vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tum(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_f32m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tum(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f64m1x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tum(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f64m2x2_tum(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tum(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f64m4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tum(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tum(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tum(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tum(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i8m1x2_tum(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tum(vbool4_t vm, vint8m2x2_t vd, + const int8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i8m2x2_tum(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tum(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { +vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tum(vbool2_t vm, vint8m4x2_t vd, + const int8_t *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i8m4x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tum(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tum(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tum(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i16m1x2_tum(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tum(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i16m2x2_tum(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { +vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tum(vbool4_t vm, vint16m4x2_t vd, + const int16_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i16m4x2_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tum(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tum(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i32m1x2_tum(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tum(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i32m2x2_tum(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { +vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tum(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i32m4x2_tum(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tum(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i64m1x2_tum(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tum(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i64m2x2_tum(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tum(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i64m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tum(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u8mf8x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tum(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u8mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tum(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u8mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tum(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_u8m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tum(vbool4_t vm, vuint8m2x2_t vd, + const uint8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_u8m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tum(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { +vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tum(vbool2_t vm, vuint8m4x2_t vd, + const uint8_t *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_u8m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tum(vbool64_t vm, vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u16mf4x2_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tum(vbool32_t vm, vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u16mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tum(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u16m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tum(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u16m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tum(vbool4_t vm, vuint16m4x2_t vd, + const uint16_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u16m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tum(vbool64_t vm, vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u32mf2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tum(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u32m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tum(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u32m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tum(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u32m4x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tum(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u64m1x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tum(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u64m2x2_tum(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tum(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u64m4x2_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tumu(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_tumu(vbool64_t vm, + vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tumu(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_tumu(vbool32_t vm, + vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_tumu(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_tumu(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, const _Float16 *rs1, vuint8m2_t rs2, size_t vl) { +vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_tumu(vbool4_t vm, vfloat16m4x2_t vd, + const _Float16 *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tumu(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_tumu(vbool64_t vm, + vfloat32mf2x2_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_tumu(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_tumu(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, vuint8m1_t rs2, size_t vl) { +vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_tumu(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_f32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_tumu(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_tumu(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_tumu(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_tumu(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_tumu(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_tumu(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_tumu(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_tumu(vbool4_t vm, vint8m2x2_t vd, + const int8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i8m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tumu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { +vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_tumu(vbool2_t vm, vint8m4x2_t vd, + const int8_t *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i8m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_tumu(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_tumu(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_tumu(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_tumu(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { +vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_tumu(vbool4_t vm, vint16m4x2_t vd, + const int16_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_tumu(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_tumu(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_tumu(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { +vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_tumu(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_tumu(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_tumu(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_tumu(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_tumu(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u8mf8x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_tumu(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u8mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_tumu(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u8mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_tumu(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_u8m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_tumu(vbool4_t vm, vuint8m2x2_t vd, + const uint8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_u8m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tumu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { +vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_tumu(vbool2_t vm, vuint8m4x2_t vd, + const uint8_t *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_u8m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_tumu(vbool64_t vm, vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u16mf4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_tumu(vbool32_t vm, vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u16mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_tumu(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u16m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_tumu(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u16m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_tumu(vbool4_t vm, vuint16m4x2_t vd, + const uint16_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u16m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_tumu(vbool64_t vm, vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u32mf2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_tumu(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u32m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_tumu(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u32m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_tumu(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u32m4x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_tumu(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u64m1x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_tumu(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u64m2x2_tumu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_tumu(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u64m4x2_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x2_t test_vluxseg2ei8_v_f16mf4x2_mu(vbool64_t vm, vfloat16mf4x2_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x2_t test_vluxseg2ei8_v_f16mf2x2_mu(vbool32_t vm, vfloat16mf2x2_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x2_t test_vluxseg2ei8_v_f16m1x2_mu(vbool16_t vm, vfloat16m1x2_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f16m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2x2_t test_vluxseg2ei8_v_f16m2x2_mu(vbool8_t vm, vfloat16m2x2_t vd, + const _Float16 *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f16m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, const _Float16 *rs1, vuint8m2_t rs2, size_t vl) { +vfloat16m4x2_t test_vluxseg2ei8_v_f16m4x2_mu(vbool4_t vm, vfloat16m4x2_t vd, + const _Float16 *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f16m4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x2_t test_vluxseg2ei8_v_f32mf2x2_mu(vbool64_t vm, vfloat32mf2x2_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_f32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x2_t test_vluxseg2ei8_v_f32m1x2_mu(vbool32_t vm, vfloat32m1x2_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_f32m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2x2_t test_vluxseg2ei8_v_f32m2x2_mu(vbool16_t vm, vfloat32m2x2_t vd, + const float *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_f32m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, const float *rs1, vuint8m1_t rs2, size_t vl) { +vfloat32m4x2_t test_vluxseg2ei8_v_f32m4x2_mu(vbool8_t vm, vfloat32m4x2_t vd, + const float *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_f32m4x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x2_t test_vluxseg2ei8_v_f64m1x2_mu(vbool64_t vm, vfloat64m1x2_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_f64m1x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2x2_t test_vluxseg2ei8_v_f64m2x2_mu(vbool32_t vm, vfloat64m2x2_t vd, + const double *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_f64m2x2_mu(vm, vd, rs1, rs2, vl); } -vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, const double *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat64m4x2_t test_vluxseg2ei8_v_f64m4x2_mu(vbool16_t vm, vfloat64m4x2_t vd, + const double *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_f64m4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x2_t test_vluxseg2ei8_v_i8mf8x2_mu(vbool64_t vm, vint8mf8x2_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x2_t test_vluxseg2ei8_v_i8mf4x2_mu(vbool32_t vm, vint8mf4x2_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x2_t test_vluxseg2ei8_v_i8mf2x2_mu(vbool16_t vm, vint8mf2x2_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x2_t test_vluxseg2ei8_v_i8m1x2_mu(vbool8_t vm, vint8m1x2_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i8m1x2_mu(vm, vd, rs1, rs2, vl); } -vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2x2_t test_vluxseg2ei8_v_i8m2x2_mu(vbool4_t vm, vint8m2x2_t vd, + const int8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i8m2x2_mu(vm, vd, rs1, rs2, vl); } -vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_mu(vbool2_t vm, vint8m4x2_t vd, const int8_t *rs1, vuint8m4_t rs2, size_t vl) { +vint8m4x2_t test_vluxseg2ei8_v_i8m4x2_mu(vbool2_t vm, vint8m4x2_t vd, + const int8_t *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i8m4x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x2_t test_vluxseg2ei8_v_i16mf4x2_mu(vbool64_t vm, vint16mf4x2_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x2_t test_vluxseg2ei8_v_i16mf2x2_mu(vbool32_t vm, vint16mf2x2_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x2_t test_vluxseg2ei8_v_i16m1x2_mu(vbool16_t vm, vint16m1x2_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i16m1x2_mu(vm, vd, rs1, rs2, vl); } -vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2x2_t test_vluxseg2ei8_v_i16m2x2_mu(vbool8_t vm, vint16m2x2_t vd, + const int16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i16m2x2_mu(vm, vd, rs1, rs2, vl); } -vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, const int16_t *rs1, vuint8m2_t rs2, size_t vl) { +vint16m4x2_t test_vluxseg2ei8_v_i16m4x2_mu(vbool4_t vm, vint16m4x2_t vd, + const int16_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i16m4x2_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x2_t test_vluxseg2ei8_v_i32mf2x2_mu(vbool64_t vm, vint32mf2x2_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_i32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x2_t test_vluxseg2ei8_v_i32m1x2_mu(vbool32_t vm, vint32m1x2_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i32m1x2_mu(vm, vd, rs1, rs2, vl); } -vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2x2_t test_vluxseg2ei8_v_i32m2x2_mu(vbool16_t vm, vint32m2x2_t vd, + const int32_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i32m2x2_mu(vm, vd, rs1, rs2, vl); } -vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, const int32_t *rs1, vuint8m1_t rs2, size_t vl) { +vint32m4x2_t test_vluxseg2ei8_v_i32m4x2_mu(vbool8_t vm, vint32m4x2_t vd, + const int32_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i32m4x2_mu(vm, vd, rs1, rs2, vl); } -vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x2_t test_vluxseg2ei8_v_i64m1x2_mu(vbool64_t vm, vint64m1x2_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i64m1x2_mu(vm, vd, rs1, rs2, vl); } -vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2x2_t test_vluxseg2ei8_v_i64m2x2_mu(vbool32_t vm, vint64m2x2_t vd, + const int64_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i64m2x2_mu(vm, vd, rs1, rs2, vl); } -vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, const int64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint64m4x2_t test_vluxseg2ei8_v_i64m4x2_mu(vbool16_t vm, vint64m4x2_t vd, + const int64_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_i64m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x2_t test_vluxseg2ei8_v_u8mf8x2_mu(vbool64_t vm, vuint8mf8x2_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_u8mf8x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x2_t test_vluxseg2ei8_v_u8mf4x2_mu(vbool32_t vm, vuint8mf4x2_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_u8mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x2_t test_vluxseg2ei8_v_u8mf2x2_mu(vbool16_t vm, vuint8mf2x2_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_u8mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x2_t test_vluxseg2ei8_v_u8m1x2_mu(vbool8_t vm, vuint8m1x2_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_u8m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2x2_t test_vluxseg2ei8_v_u8m2x2_mu(vbool4_t vm, vuint8m2x2_t vd, + const uint8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_u8m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_mu(vbool2_t vm, vuint8m4x2_t vd, const uint8_t *rs1, vuint8m4_t rs2, size_t vl) { +vuint8m4x2_t test_vluxseg2ei8_v_u8m4x2_mu(vbool2_t vm, vuint8m4x2_t vd, + const uint8_t *rs1, vuint8m4_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_u8m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x2_t test_vluxseg2ei8_v_u16mf4x2_mu(vbool64_t vm, vuint16mf4x2_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u16mf4x2_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x2_t test_vluxseg2ei8_v_u16mf2x2_mu(vbool32_t vm, vuint16mf2x2_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u16mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x2_t test_vluxseg2ei8_v_u16m1x2_mu(vbool16_t vm, vuint16m1x2_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u16m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2x2_t test_vluxseg2ei8_v_u16m2x2_mu(vbool8_t vm, vuint16m2x2_t vd, + const uint16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_u16m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, const uint16_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint16m4x2_t test_vluxseg2ei8_v_u16m4x2_mu(vbool4_t vm, vuint16m4x2_t vd, + const uint16_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_u16m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x2_t test_vluxseg2ei8_v_u32mf2x2_mu(vbool64_t vm, vuint32mf2x2_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u32mf2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x2_t test_vluxseg2ei8_v_u32m1x2_mu(vbool32_t vm, vuint32m1x2_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u32m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2x2_t test_vluxseg2ei8_v_u32m2x2_mu(vbool16_t vm, vuint32m2x2_t vd, + const uint32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u32m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, const uint32_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint32m4x2_t test_vluxseg2ei8_v_u32m4x2_mu(vbool8_t vm, vuint32m4x2_t vd, + const uint32_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg2ei8_v_u32m4x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x2_t test_vluxseg2ei8_v_u64m1x2_mu(vbool64_t vm, vuint64m1x2_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u64m1x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2x2_t test_vluxseg2ei8_v_u64m2x2_mu(vbool32_t vm, vuint64m2x2_t vd, + const uint64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u64m2x2_mu(vm, vd, rs1, rs2, vl); } -vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, const uint64_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint64m4x2_t test_vluxseg2ei8_v_u64m4x2_mu(vbool16_t vm, vuint64m4x2_t vd, + const uint64_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg2ei8_v_u64m4x2_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxseg3ei16.c b/auto-generated/policy_funcs/llvm-api-tests/vluxseg3ei16.c index b263fa23c..8614a187d 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxseg3ei16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxseg3ei16.c @@ -1,599 +1,894 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tu(vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f16mf4x3_tu(vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tu(vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f16mf2x3_tu(vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tu(vfloat16m1x3_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tu(vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f16m1x3_tu(vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tu(vfloat16m2x3_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tu(vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f16m2x3_tu(vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tu(vfloat32mf2x3_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f32mf2x3_tu(vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tu(vfloat32m1x3_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tu(vfloat32m1x3_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f32m1x3_tu(vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tu(vfloat32m2x3_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tu(vfloat32m2x3_t vd, + const float *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxseg3ei16_v_f32m2x3_tu(vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tu(vfloat64m1x3_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tu(vfloat64m1x3_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f64m1x3_tu(vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tu(vfloat64m2x3_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tu(vfloat64m2x3_t vd, + const double *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f64m2x3_tu(vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i8mf8x3_tu(vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i8mf4x3_tu(vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i8mf2x3_tu(vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i8m1x3_tu(vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i8m2x3_tu(vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tu(vint16mf4x3_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i16mf4x3_tu(vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tu(vint16mf2x3_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i16mf2x3_tu(vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i16m1x3_tu(vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i16m2x3_tu(vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tu(vint32mf2x3_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i32mf2x3_tu(vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i32m1x3_tu(vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i32m2x3_tu(vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i64m1x3_tu(vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i64m2x3_tu(vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tu(vuint8mf8x3_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u8mf8x3_tu(vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tu(vuint8mf4x3_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u8mf4x3_tu(vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tu(vuint8mf2x3_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u8mf2x3_tu(vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u8m1x3_tu(vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u8m2x3_tu(vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tu(vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u16mf4x3_tu(vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tu(vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u16mf2x3_tu(vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tu(vuint16m1x3_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u16m1x3_tu(vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tu(vuint16m2x3_t vd, + const uint16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u16m2x3_tu(vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tu(vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u32mf2x3_tu(vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tu(vuint32m1x3_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u32m1x3_tu(vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tu(vuint32m2x3_t vd, + const uint32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u32m2x3_tu(vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tu(vuint64m1x3_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u64m1x3_tu(vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tu(vuint64m2x3_t vd, + const uint64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u64m2x3_tu(vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tum(vbool64_t vm, + vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tum(vbool32_t vm, + vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f16m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f16m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tum(vbool64_t vm, + vfloat32mf2x3_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f32m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f32m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f64m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f64m2x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxseg3ei16_v_i8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg3ei16_v_i8m1x3_tum(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, + const int8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxseg3ei16_v_i8m2x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i16m1x3_tum(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i16m2x3_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i32m1x3_tum(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i32m2x3_tum(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i64m1x3_tum(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i64m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg3ei16_v_u8m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, + const uint8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxseg3ei16_v_u8m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u16m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u16m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u32m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u32m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u64m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u64m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_tumu(vbool64_t vm, + vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_tumu(vbool32_t vm, + vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_tumu(vbool64_t vm, + vfloat32mf2x3_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg3ei16_v_i8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, + const int8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxseg3ei16_v_i8m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, + const uint8_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u8m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_tumu(vbool64_t vm, + vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_tumu(vbool32_t vm, + vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_tumu(vbool64_t vm, + vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x3_t test_vluxseg3ei16_v_f16mf4x3_mu(vbool64_t vm, + vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x3_t test_vluxseg3ei16_v_f16mf2x3_mu(vbool32_t vm, + vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x3_t test_vluxseg3ei16_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f16m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2x3_t test_vluxseg3ei16_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f16m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x3_t test_vluxseg3ei16_v_f32mf2x3_mu(vbool64_t vm, + vfloat32mf2x3_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x3_t test_vluxseg3ei16_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f32m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2x3_t test_vluxseg3ei16_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxseg3ei16_v_f32m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x3_t test_vluxseg3ei16_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f64m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2x3_t test_vluxseg3ei16_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_f64m2x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x3_t test_vluxseg3ei16_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxseg3ei16_v_i8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x3_t test_vluxseg3ei16_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxseg3ei16_v_i8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x3_t test_vluxseg3ei16_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxseg3ei16_v_i8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x3_t test_vluxseg3ei16_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg3ei16_v_i8m1x3_mu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2x3_t test_vluxseg3ei16_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, + const int8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxseg3ei16_v_i8m2x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x3_t test_vluxseg3ei16_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x3_t test_vluxseg3ei16_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x3_t test_vluxseg3ei16_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxseg3ei16_v_i16m1x3_mu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2x3_t test_vluxseg3ei16_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg3ei16_v_i16m2x3_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x3_t test_vluxseg3ei16_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x3_t test_vluxseg3ei16_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i32m1x3_mu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2x3_t test_vluxseg3ei16_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxseg3ei16_v_i32m2x3_mu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x3_t test_vluxseg3ei16_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i64m1x3_mu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2x3_t test_vluxseg3ei16_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_i64m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x3_t test_vluxseg3ei16_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x3_t test_vluxseg3ei16_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x3_t test_vluxseg3ei16_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x3_t test_vluxseg3ei16_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg3ei16_v_u8m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2x3_t test_vluxseg3ei16_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, + const uint8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxseg3ei16_v_u8m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x3_t test_vluxseg3ei16_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x3_t test_vluxseg3ei16_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x3_t test_vluxseg3ei16_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u16m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2x3_t test_vluxseg3ei16_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u16m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x3_t test_vluxseg3ei16_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x3_t test_vluxseg3ei16_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u32m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2x3_t test_vluxseg3ei16_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u32m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x3_t test_vluxseg3ei16_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u64m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2x3_t test_vluxseg3ei16_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei16_v_u64m2x3_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxseg3ei32.c b/auto-generated/policy_funcs/llvm-api-tests/vluxseg3ei32.c index 6a661a4fc..b6ecc6fb7 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxseg3ei32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxseg3ei32.c @@ -1,599 +1,894 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tu(vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f16mf4x3_tu(vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tu(vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f16mf2x3_tu(vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tu(vfloat16m1x3_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tu(vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f16m1x3_tu(vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tu(vfloat16m2x3_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tu(vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f16m2x3_tu(vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tu(vfloat32mf2x3_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f32mf2x3_tu(vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tu(vfloat32m1x3_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tu(vfloat32m1x3_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg3ei32_v_f32m1x3_tu(vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tu(vfloat32m2x3_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tu(vfloat32m2x3_t vd, + const float *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxseg3ei32_v_f32m2x3_tu(vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tu(vfloat64m1x3_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tu(vfloat64m1x3_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f64m1x3_tu(vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tu(vfloat64m2x3_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tu(vfloat64m2x3_t vd, + const double *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f64m2x3_tu(vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i8mf8x3_tu(vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i8mf4x3_tu(vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i8mf2x3_tu(vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i8m1x3_tu(vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i8m2x3_tu(vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tu(vint16mf4x3_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i16mf4x3_tu(vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tu(vint16mf2x3_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i16mf2x3_tu(vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i16m1x3_tu(vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i16m2x3_tu(vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tu(vint32mf2x3_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i32mf2x3_tu(vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i32m1x3_tu(vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i32m2x3_tu(vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i64m1x3_tu(vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i64m2x3_tu(vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tu(vuint8mf8x3_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u8mf8x3_tu(vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tu(vuint8mf4x3_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u8mf4x3_tu(vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tu(vuint8mf2x3_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u8mf2x3_tu(vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u8m1x3_tu(vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u8m2x3_tu(vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tu(vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u16mf4x3_tu(vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tu(vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u16mf2x3_tu(vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tu(vuint16m1x3_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u16m1x3_tu(vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tu(vuint16m2x3_t vd, + const uint16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u16m2x3_tu(vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tu(vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u32mf2x3_tu(vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tu(vuint32m1x3_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u32m1x3_tu(vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tu(vuint32m2x3_t vd, + const uint32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u32m2x3_tu(vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tu(vuint64m1x3_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u64m1x3_tu(vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tu(vuint64m2x3_t vd, + const uint64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u64m2x3_tu(vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tum(vbool64_t vm, + vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tum(vbool32_t vm, + vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f16m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f16m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tum(vbool64_t vm, + vfloat32mf2x3_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f32m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f32m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f64m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f64m2x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg3ei32_v_i8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxseg3ei32_v_i8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg3ei32_v_i8m1x3_tum(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, + const int8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxseg3ei32_v_i8m2x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i16m1x3_tum(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i16m2x3_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i32m1x3_tum(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i32m2x3_tum(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i64m1x3_tum(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i64m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg3ei32_v_u8m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, + const uint8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxseg3ei32_v_u8m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u16m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u16m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u32m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u32m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u64m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u64m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_tumu(vbool64_t vm, + vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_tumu(vbool32_t vm, + vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_tumu(vbool64_t vm, + vfloat32mf2x3_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg3ei32_v_i8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, + const int8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxseg3ei32_v_i8m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, + const uint8_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u8m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_tumu(vbool64_t vm, + vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_tumu(vbool32_t vm, + vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_tumu(vbool64_t vm, + vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x3_t test_vluxseg3ei32_v_f16mf4x3_mu(vbool64_t vm, + vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x3_t test_vluxseg3ei32_v_f16mf2x3_mu(vbool32_t vm, + vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x3_t test_vluxseg3ei32_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f16m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2x3_t test_vluxseg3ei32_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f16m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x3_t test_vluxseg3ei32_v_f32mf2x3_mu(vbool64_t vm, + vfloat32mf2x3_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x3_t test_vluxseg3ei32_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg3ei32_v_f32m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2x3_t test_vluxseg3ei32_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxseg3ei32_v_f32m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x3_t test_vluxseg3ei32_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f64m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2x3_t test_vluxseg3ei32_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_f64m2x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x3_t test_vluxseg3ei32_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxseg3ei32_v_i8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x3_t test_vluxseg3ei32_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg3ei32_v_i8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x3_t test_vluxseg3ei32_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxseg3ei32_v_i8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x3_t test_vluxseg3ei32_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg3ei32_v_i8m1x3_mu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2x3_t test_vluxseg3ei32_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, + const int8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxseg3ei32_v_i8m2x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x3_t test_vluxseg3ei32_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x3_t test_vluxseg3ei32_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x3_t test_vluxseg3ei32_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxseg3ei32_v_i16m1x3_mu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2x3_t test_vluxseg3ei32_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg3ei32_v_i16m2x3_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x3_t test_vluxseg3ei32_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x3_t test_vluxseg3ei32_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg3ei32_v_i32m1x3_mu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2x3_t test_vluxseg3ei32_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxseg3ei32_v_i32m2x3_mu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x3_t test_vluxseg3ei32_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_i64m1x3_mu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2x3_t test_vluxseg3ei32_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg3ei32_v_i64m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x3_t test_vluxseg3ei32_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x3_t test_vluxseg3ei32_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x3_t test_vluxseg3ei32_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x3_t test_vluxseg3ei32_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg3ei32_v_u8m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2x3_t test_vluxseg3ei32_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, + const uint8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxseg3ei32_v_u8m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x3_t test_vluxseg3ei32_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x3_t test_vluxseg3ei32_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x3_t test_vluxseg3ei32_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u16m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2x3_t test_vluxseg3ei32_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u16m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x3_t test_vluxseg3ei32_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x3_t test_vluxseg3ei32_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u32m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2x3_t test_vluxseg3ei32_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u32m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x3_t test_vluxseg3ei32_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u64m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2x3_t test_vluxseg3ei32_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg3ei32_v_u64m2x3_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxseg3ei64.c b/auto-generated/policy_funcs/llvm-api-tests/vluxseg3ei64.c index e54d64b64..2f594f2b6 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxseg3ei64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxseg3ei64.c @@ -1,567 +1,848 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tu(vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f16mf4x3_tu(vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tu(vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f16mf2x3_tu(vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tu(vfloat16m1x3_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tu(vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f16m1x3_tu(vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tu(vfloat16m2x3_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tu(vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f16m2x3_tu(vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tu(vfloat32mf2x3_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f32mf2x3_tu(vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tu(vfloat32m1x3_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tu(vfloat32m1x3_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg3ei64_v_f32m1x3_tu(vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tu(vfloat32m2x3_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tu(vfloat32m2x3_t vd, + const float *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxseg3ei64_v_f32m2x3_tu(vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tu(vfloat64m1x3_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tu(vfloat64m1x3_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f64m1x3_tu(vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tu(vfloat64m2x3_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tu(vfloat64m2x3_t vd, + const double *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f64m2x3_tu(vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i8mf8x3_tu(vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i8mf4x3_tu(vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i8mf2x3_tu(vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i8m1x3_tu(vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tu(vint16mf4x3_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i16mf4x3_tu(vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tu(vint16mf2x3_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i16mf2x3_tu(vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i16m1x3_tu(vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i16m2x3_tu(vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tu(vint32mf2x3_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i32mf2x3_tu(vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i32m1x3_tu(vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i32m2x3_tu(vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i64m1x3_tu(vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i64m2x3_tu(vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tu(vuint8mf8x3_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u8mf8x3_tu(vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tu(vuint8mf4x3_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u8mf4x3_tu(vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tu(vuint8mf2x3_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u8mf2x3_tu(vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u8m1x3_tu(vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tu(vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u16mf4x3_tu(vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tu(vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u16mf2x3_tu(vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tu(vuint16m1x3_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u16m1x3_tu(vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tu(vuint16m2x3_t vd, + const uint16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u16m2x3_tu(vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tu(vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u32mf2x3_tu(vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tu(vuint32m1x3_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u32m1x3_tu(vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tu(vuint32m2x3_t vd, + const uint32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u32m2x3_tu(vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tu(vuint64m1x3_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u64m1x3_tu(vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tu(vuint64m2x3_t vd, + const uint64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u64m2x3_tu(vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tum(vbool64_t vm, + vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tum(vbool32_t vm, + vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f16m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f16m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tum(vbool64_t vm, + vfloat32mf2x3_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f32m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f32m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f64m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f64m2x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxseg3ei64_v_i8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg3ei64_v_i8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxseg3ei64_v_i8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg3ei64_v_i8m1x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i16m1x3_tum(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i16m2x3_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i32m1x3_tum(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i32m2x3_tum(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i64m1x3_tum(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i64m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg3ei64_v_u8m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u16m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u16m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u32m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u32m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u64m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u64m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_tumu(vbool64_t vm, + vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_tumu(vbool32_t vm, + vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_tumu(vbool64_t vm, + vfloat32mf2x3_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg3ei64_v_i8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_tumu(vbool64_t vm, + vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_tumu(vbool32_t vm, + vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_tumu(vbool64_t vm, + vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x3_t test_vluxseg3ei64_v_f16mf4x3_mu(vbool64_t vm, + vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x3_t test_vluxseg3ei64_v_f16mf2x3_mu(vbool32_t vm, + vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x3_t test_vluxseg3ei64_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f16m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2x3_t test_vluxseg3ei64_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f16m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x3_t test_vluxseg3ei64_v_f32mf2x3_mu(vbool64_t vm, + vfloat32mf2x3_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x3_t test_vluxseg3ei64_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg3ei64_v_f32m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2x3_t test_vluxseg3ei64_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxseg3ei64_v_f32m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x3_t test_vluxseg3ei64_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f64m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2x3_t test_vluxseg3ei64_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_f64m2x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x3_t test_vluxseg3ei64_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxseg3ei64_v_i8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x3_t test_vluxseg3ei64_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg3ei64_v_i8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x3_t test_vluxseg3ei64_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxseg3ei64_v_i8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x3_t test_vluxseg3ei64_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg3ei64_v_i8m1x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x3_t test_vluxseg3ei64_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x3_t test_vluxseg3ei64_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x3_t test_vluxseg3ei64_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxseg3ei64_v_i16m1x3_mu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2x3_t test_vluxseg3ei64_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg3ei64_v_i16m2x3_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x3_t test_vluxseg3ei64_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_i32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x3_t test_vluxseg3ei64_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg3ei64_v_i32m1x3_mu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2x3_t test_vluxseg3ei64_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxseg3ei64_v_i32m2x3_mu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x3_t test_vluxseg3ei64_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxseg3ei64_v_i64m1x3_mu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2x3_t test_vluxseg3ei64_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg3ei64_v_i64m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x3_t test_vluxseg3ei64_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x3_t test_vluxseg3ei64_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x3_t test_vluxseg3ei64_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x3_t test_vluxseg3ei64_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg3ei64_v_u8m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x3_t test_vluxseg3ei64_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x3_t test_vluxseg3ei64_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x3_t test_vluxseg3ei64_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u16m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2x3_t test_vluxseg3ei64_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u16m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x3_t test_vluxseg3ei64_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x3_t test_vluxseg3ei64_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u32m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2x3_t test_vluxseg3ei64_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u32m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x3_t test_vluxseg3ei64_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u64m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2x3_t test_vluxseg3ei64_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg3ei64_v_u64m2x3_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxseg3ei8.c b/auto-generated/policy_funcs/llvm-api-tests/vluxseg3ei8.c index b0ba569bb..1deb31ad0 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxseg3ei8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxseg3ei8.c @@ -1,599 +1,888 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tu(vfloat16mf4x3_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tu(vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_f16mf4x3_tu(vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tu(vfloat16mf2x3_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tu(vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_f16mf2x3_tu(vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tu(vfloat16m1x3_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tu(vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_f16m1x3_tu(vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tu(vfloat16m2x3_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tu(vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_f16m2x3_tu(vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tu(vfloat32mf2x3_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tu(vfloat32mf2x3_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_f32mf2x3_tu(vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tu(vfloat32m1x3_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tu(vfloat32m1x3_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_f32m1x3_tu(vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tu(vfloat32m2x3_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tu(vfloat32m2x3_t vd, + const float *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_f32m2x3_tu(vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tu(vfloat64m1x3_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tu(vfloat64m1x3_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_f64m1x3_tu(vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tu(vfloat64m2x3_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tu(vfloat64m2x3_t vd, + const double *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_f64m2x3_tu(vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tu(vint8mf8x3_t vd, const int8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_i8mf8x3_tu(vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tu(vint8mf4x3_t vd, const int8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_i8mf4x3_tu(vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tu(vint8mf2x3_t vd, const int8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_i8mf2x3_tu(vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tu(vint8m1x3_t vd, const int8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_i8m1x3_tu(vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tu(vint8m2x3_t vd, const int8_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_i8m2x3_tu(vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tu(vint16mf4x3_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tu(vint16mf4x3_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_i16mf4x3_tu(vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tu(vint16mf2x3_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tu(vint16mf2x3_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_i16mf2x3_tu(vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tu(vint16m1x3_t vd, const int16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_i16m1x3_tu(vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tu(vint16m2x3_t vd, const int16_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_i16m2x3_tu(vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tu(vint32mf2x3_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tu(vint32mf2x3_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_i32mf2x3_tu(vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tu(vint32m1x3_t vd, const int32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_i32m1x3_tu(vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tu(vint32m2x3_t vd, const int32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_i32m2x3_tu(vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tu(vint64m1x3_t vd, const int64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_i64m1x3_tu(vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tu(vint64m2x3_t vd, const int64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_i64m2x3_tu(vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tu(vuint8mf8x3_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tu(vuint8mf8x3_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_u8mf8x3_tu(vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tu(vuint8mf4x3_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tu(vuint8mf4x3_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_u8mf4x3_tu(vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tu(vuint8mf2x3_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tu(vuint8mf2x3_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_u8mf2x3_tu(vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tu(vuint8m1x3_t vd, const uint8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u8m1x3_tu(vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tu(vuint8m2x3_t vd, const uint8_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u8m2x3_tu(vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tu(vuint16mf4x3_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tu(vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u16mf4x3_tu(vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tu(vuint16mf2x3_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tu(vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u16mf2x3_tu(vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tu(vuint16m1x3_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tu(vuint16m1x3_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u16m1x3_tu(vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tu(vuint16m2x3_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tu(vuint16m2x3_t vd, + const uint16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_u16m2x3_tu(vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tu(vuint32mf2x3_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tu(vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u32mf2x3_tu(vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tu(vuint32m1x3_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tu(vuint32m1x3_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u32m1x3_tu(vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tu(vuint32m2x3_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tu(vuint32m2x3_t vd, + const uint32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u32m2x3_tu(vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tu(vuint64m1x3_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tu(vuint64m1x3_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u64m1x3_tu(vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tu(vuint64m2x3_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tu(vuint64m2x3_t vd, + const uint64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u64m2x3_tu(vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tum(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tum(vbool64_t vm, + vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_f16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tum(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tum(vbool32_t vm, + vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_f16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tum(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_f16m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tum(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_f16m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tum(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tum(vbool64_t vm, + vfloat32mf2x3_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_f32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tum(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_f32m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tum(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_f32m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tum(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_f64m1x3_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tum(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_f64m2x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tum(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_i8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tum(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_i8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tum(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_i8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tum(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_i8m1x3_tum(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tum(vbool4_t vm, vint8m2x3_t vd, + const int8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_i8m2x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tum(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_i16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tum(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_i16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tum(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_i16m1x3_tum(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tum(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_i16m2x3_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tum(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_i32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tum(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_i32m1x3_tum(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tum(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_i32m2x3_tum(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tum(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_i64m1x3_tum(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tum(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_i64m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tum(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u8mf8x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tum(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u8mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tum(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u8mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tum(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_u8m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tum(vbool4_t vm, vuint8m2x3_t vd, + const uint8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_u8m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tum(vbool64_t vm, vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u16mf4x3_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tum(vbool32_t vm, vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u16mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tum(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u16m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tum(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u16m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tum(vbool64_t vm, vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u32mf2x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tum(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u32m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tum(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u32m2x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tum(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u64m1x3_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tum(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u64m2x3_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tumu(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_tumu(vbool64_t vm, + vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_f16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tumu(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_tumu(vbool32_t vm, + vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_f16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_tumu(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_f16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_tumu(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_f16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tumu(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_tumu(vbool64_t vm, + vfloat32mf2x3_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_f32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_tumu(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_f32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_tumu(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_f32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_tumu(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_f64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_tumu(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_f64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_tumu(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_i8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_tumu(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_i8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_tumu(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_i8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_tumu(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_i8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_tumu(vbool4_t vm, vint8m2x3_t vd, + const int8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_i8m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_tumu(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_i16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_tumu(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_i16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_tumu(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_i16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_tumu(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_i16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_tumu(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_i32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_tumu(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_i32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_tumu(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_i32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_tumu(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_i64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_tumu(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_i64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_tumu(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u8mf8x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_tumu(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u8mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_tumu(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u8mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_tumu(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_u8m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_tumu(vbool4_t vm, vuint8m2x3_t vd, + const uint8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_u8m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_tumu(vbool64_t vm, vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u16mf4x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_tumu(vbool32_t vm, vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u16mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_tumu(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u16m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_tumu(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u16m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_tumu(vbool64_t vm, vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u32mf2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_tumu(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u32m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_tumu(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u32m2x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_tumu(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u64m1x3_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_tumu(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u64m2x3_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x3_t test_vluxseg3ei8_v_f16mf4x3_mu(vbool64_t vm, vfloat16mf4x3_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_f16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x3_t test_vluxseg3ei8_v_f16mf2x3_mu(vbool32_t vm, vfloat16mf2x3_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_f16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x3_t test_vluxseg3ei8_v_f16m1x3_mu(vbool16_t vm, vfloat16m1x3_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_f16m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2x3_t test_vluxseg3ei8_v_f16m2x3_mu(vbool8_t vm, vfloat16m2x3_t vd, + const _Float16 *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_f16m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x3_t test_vluxseg3ei8_v_f32mf2x3_mu(vbool64_t vm, vfloat32mf2x3_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_f32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x3_t test_vluxseg3ei8_v_f32m1x3_mu(vbool32_t vm, vfloat32m1x3_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_f32m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2x3_t test_vluxseg3ei8_v_f32m2x3_mu(vbool16_t vm, vfloat32m2x3_t vd, + const float *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_f32m2x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x3_t test_vluxseg3ei8_v_f64m1x3_mu(vbool64_t vm, vfloat64m1x3_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_f64m1x3_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2x3_t test_vluxseg3ei8_v_f64m2x3_mu(vbool32_t vm, vfloat64m2x3_t vd, + const double *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_f64m2x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x3_t test_vluxseg3ei8_v_i8mf8x3_mu(vbool64_t vm, vint8mf8x3_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_i8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x3_t test_vluxseg3ei8_v_i8mf4x3_mu(vbool32_t vm, vint8mf4x3_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_i8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x3_t test_vluxseg3ei8_v_i8mf2x3_mu(vbool16_t vm, vint8mf2x3_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_i8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x3_t test_vluxseg3ei8_v_i8m1x3_mu(vbool8_t vm, vint8m1x3_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_i8m1x3_mu(vm, vd, rs1, rs2, vl); } -vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2x3_t test_vluxseg3ei8_v_i8m2x3_mu(vbool4_t vm, vint8m2x3_t vd, + const int8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_i8m2x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x3_t test_vluxseg3ei8_v_i16mf4x3_mu(vbool64_t vm, vint16mf4x3_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_i16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x3_t test_vluxseg3ei8_v_i16mf2x3_mu(vbool32_t vm, vint16mf2x3_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_i16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x3_t test_vluxseg3ei8_v_i16m1x3_mu(vbool16_t vm, vint16m1x3_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_i16m1x3_mu(vm, vd, rs1, rs2, vl); } -vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2x3_t test_vluxseg3ei8_v_i16m2x3_mu(vbool8_t vm, vint16m2x3_t vd, + const int16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_i16m2x3_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x3_t test_vluxseg3ei8_v_i32mf2x3_mu(vbool64_t vm, vint32mf2x3_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_i32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x3_t test_vluxseg3ei8_v_i32m1x3_mu(vbool32_t vm, vint32m1x3_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_i32m1x3_mu(vm, vd, rs1, rs2, vl); } -vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2x3_t test_vluxseg3ei8_v_i32m2x3_mu(vbool16_t vm, vint32m2x3_t vd, + const int32_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_i32m2x3_mu(vm, vd, rs1, rs2, vl); } -vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x3_t test_vluxseg3ei8_v_i64m1x3_mu(vbool64_t vm, vint64m1x3_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_i64m1x3_mu(vm, vd, rs1, rs2, vl); } -vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2x3_t test_vluxseg3ei8_v_i64m2x3_mu(vbool32_t vm, vint64m2x3_t vd, + const int64_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_i64m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x3_t test_vluxseg3ei8_v_u8mf8x3_mu(vbool64_t vm, vuint8mf8x3_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_u8mf8x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x3_t test_vluxseg3ei8_v_u8mf4x3_mu(vbool32_t vm, vuint8mf4x3_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_u8mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x3_t test_vluxseg3ei8_v_u8mf2x3_mu(vbool16_t vm, vuint8mf2x3_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_u8mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x3_t test_vluxseg3ei8_v_u8m1x3_mu(vbool8_t vm, vuint8m1x3_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_u8m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2x3_t test_vluxseg3ei8_v_u8m2x3_mu(vbool4_t vm, vuint8m2x3_t vd, + const uint8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_u8m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x3_t test_vluxseg3ei8_v_u16mf4x3_mu(vbool64_t vm, vuint16mf4x3_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u16mf4x3_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x3_t test_vluxseg3ei8_v_u16mf2x3_mu(vbool32_t vm, vuint16mf2x3_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u16mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x3_t test_vluxseg3ei8_v_u16m1x3_mu(vbool16_t vm, vuint16m1x3_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u16m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2x3_t test_vluxseg3ei8_v_u16m2x3_mu(vbool8_t vm, vuint16m2x3_t vd, + const uint16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg3ei8_v_u16m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x3_t test_vluxseg3ei8_v_u32mf2x3_mu(vbool64_t vm, vuint32mf2x3_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u32mf2x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x3_t test_vluxseg3ei8_v_u32m1x3_mu(vbool32_t vm, vuint32m1x3_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u32m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2x3_t test_vluxseg3ei8_v_u32m2x3_mu(vbool16_t vm, vuint32m2x3_t vd, + const uint32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u32m2x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x3_t test_vluxseg3ei8_v_u64m1x3_mu(vbool64_t vm, vuint64m1x3_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u64m1x3_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2x3_t test_vluxseg3ei8_v_u64m2x3_mu(vbool32_t vm, vuint64m2x3_t vd, + const uint64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg3ei8_v_u64m2x3_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxseg4ei16.c b/auto-generated/policy_funcs/llvm-api-tests/vluxseg4ei16.c index e4ae644df..f6bbc067b 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxseg4ei16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxseg4ei16.c @@ -1,599 +1,894 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tu(vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f16mf4x4_tu(vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tu(vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f16mf2x4_tu(vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tu(vfloat16m1x4_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tu(vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f16m1x4_tu(vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tu(vfloat16m2x4_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tu(vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f16m2x4_tu(vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tu(vfloat32mf2x4_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f32mf2x4_tu(vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tu(vfloat32m1x4_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tu(vfloat32m1x4_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f32m1x4_tu(vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tu(vfloat32m2x4_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tu(vfloat32m2x4_t vd, + const float *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxseg4ei16_v_f32m2x4_tu(vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tu(vfloat64m1x4_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tu(vfloat64m1x4_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f64m1x4_tu(vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tu(vfloat64m2x4_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tu(vfloat64m2x4_t vd, + const double *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f64m2x4_tu(vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i8mf8x4_tu(vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i8mf4x4_tu(vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i8mf2x4_tu(vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i8m1x4_tu(vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i8m2x4_tu(vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tu(vint16mf4x4_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i16mf4x4_tu(vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tu(vint16mf2x4_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i16mf2x4_tu(vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i16m1x4_tu(vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i16m2x4_tu(vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tu(vint32mf2x4_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i32mf2x4_tu(vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i32m1x4_tu(vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i32m2x4_tu(vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i64m1x4_tu(vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i64m2x4_tu(vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tu(vuint8mf8x4_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u8mf8x4_tu(vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tu(vuint8mf4x4_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u8mf4x4_tu(vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tu(vuint8mf2x4_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u8mf2x4_tu(vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u8m1x4_tu(vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u8m2x4_tu(vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tu(vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u16mf4x4_tu(vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tu(vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u16mf2x4_tu(vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tu(vuint16m1x4_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u16m1x4_tu(vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tu(vuint16m2x4_t vd, + const uint16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u16m2x4_tu(vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tu(vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u32mf2x4_tu(vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tu(vuint32m1x4_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u32m1x4_tu(vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tu(vuint32m2x4_t vd, + const uint32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u32m2x4_tu(vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tu(vuint64m1x4_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u64m1x4_tu(vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tu(vuint64m2x4_t vd, + const uint64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u64m2x4_tu(vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tum(vbool64_t vm, + vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tum(vbool32_t vm, + vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f16m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f16m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tum(vbool64_t vm, + vfloat32mf2x4_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f32m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f32m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f64m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f64m2x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxseg4ei16_v_i8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg4ei16_v_i8m1x4_tum(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, + const int8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxseg4ei16_v_i8m2x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i16m1x4_tum(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i16m2x4_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i32m1x4_tum(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i32m2x4_tum(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i64m1x4_tum(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i64m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg4ei16_v_u8m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, + const uint8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxseg4ei16_v_u8m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u16m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u16m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u32m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u32m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u64m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u64m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_tumu(vbool64_t vm, + vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_tumu(vbool32_t vm, + vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_tumu(vbool64_t vm, + vfloat32mf2x4_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg4ei16_v_i8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, + const int8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxseg4ei16_v_i8m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, + const uint8_t *rs1, + vuint16m4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u8m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_tumu(vbool64_t vm, + vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_tumu(vbool32_t vm, + vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_tumu(vbool64_t vm, + vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x4_t test_vluxseg4ei16_v_f16mf4x4_mu(vbool64_t vm, + vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x4_t test_vluxseg4ei16_v_f16mf2x4_mu(vbool32_t vm, + vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x4_t test_vluxseg4ei16_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f16m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, vuint16m2_t rs2, size_t vl) { +vfloat16m2x4_t test_vluxseg4ei16_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f16m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x4_t test_vluxseg4ei16_v_f32mf2x4_mu(vbool64_t vm, + vfloat32mf2x4_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x4_t test_vluxseg4ei16_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f32m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, vuint16m1_t rs2, size_t vl) { +vfloat32m2x4_t test_vluxseg4ei16_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxseg4ei16_v_f32m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x4_t test_vluxseg4ei16_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f64m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat64m2x4_t test_vluxseg4ei16_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_f64m2x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x4_t test_vluxseg4ei16_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxseg4ei16_v_i8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x4_t test_vluxseg4ei16_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxseg4ei16_v_i8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x4_t test_vluxseg4ei16_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxseg4ei16_v_i8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x4_t test_vluxseg4ei16_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg4ei16_v_i8m1x4_mu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint16m4_t rs2, size_t vl) { +vint8m2x4_t test_vluxseg4ei16_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, + const int8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxseg4ei16_v_i8m2x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x4_t test_vluxseg4ei16_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x4_t test_vluxseg4ei16_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x4_t test_vluxseg4ei16_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxseg4ei16_v_i16m1x4_mu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint16m2_t rs2, size_t vl) { +vint16m2x4_t test_vluxseg4ei16_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg4ei16_v_i16m2x4_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x4_t test_vluxseg4ei16_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x4_t test_vluxseg4ei16_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i32m1x4_mu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint16m1_t rs2, size_t vl) { +vint32m2x4_t test_vluxseg4ei16_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxseg4ei16_v_i32m2x4_mu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x4_t test_vluxseg4ei16_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i64m1x4_mu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint64m2x4_t test_vluxseg4ei16_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_i64m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x4_t test_vluxseg4ei16_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x4_t test_vluxseg4ei16_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x4_t test_vluxseg4ei16_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x4_t test_vluxseg4ei16_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg4ei16_v_u8m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint16m4_t rs2, size_t vl) { +vuint8m2x4_t test_vluxseg4ei16_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, + const uint8_t *rs1, vuint16m4_t rs2, + size_t vl) { return __riscv_vluxseg4ei16_v_u8m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x4_t test_vluxseg4ei16_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x4_t test_vluxseg4ei16_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x4_t test_vluxseg4ei16_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u16m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint16m2x4_t test_vluxseg4ei16_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u16m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x4_t test_vluxseg4ei16_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x4_t test_vluxseg4ei16_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u32m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint32m2x4_t test_vluxseg4ei16_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u32m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x4_t test_vluxseg4ei16_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u64m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint64m2x4_t test_vluxseg4ei16_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei16_v_u64m2x4_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxseg4ei32.c b/auto-generated/policy_funcs/llvm-api-tests/vluxseg4ei32.c index 759ea11af..7f194bff7 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxseg4ei32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxseg4ei32.c @@ -1,599 +1,894 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tu(vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f16mf4x4_tu(vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tu(vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f16mf2x4_tu(vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tu(vfloat16m1x4_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tu(vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f16m1x4_tu(vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tu(vfloat16m2x4_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tu(vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f16m2x4_tu(vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tu(vfloat32mf2x4_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f32mf2x4_tu(vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tu(vfloat32m1x4_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tu(vfloat32m1x4_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg4ei32_v_f32m1x4_tu(vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tu(vfloat32m2x4_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tu(vfloat32m2x4_t vd, + const float *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxseg4ei32_v_f32m2x4_tu(vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tu(vfloat64m1x4_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tu(vfloat64m1x4_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f64m1x4_tu(vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tu(vfloat64m2x4_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tu(vfloat64m2x4_t vd, + const double *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f64m2x4_tu(vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i8mf8x4_tu(vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i8mf4x4_tu(vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i8mf2x4_tu(vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i8m1x4_tu(vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i8m2x4_tu(vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tu(vint16mf4x4_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i16mf4x4_tu(vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tu(vint16mf2x4_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i16mf2x4_tu(vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i16m1x4_tu(vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i16m2x4_tu(vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tu(vint32mf2x4_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i32mf2x4_tu(vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i32m1x4_tu(vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i32m2x4_tu(vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i64m1x4_tu(vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i64m2x4_tu(vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tu(vuint8mf8x4_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u8mf8x4_tu(vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tu(vuint8mf4x4_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u8mf4x4_tu(vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tu(vuint8mf2x4_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u8mf2x4_tu(vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u8m1x4_tu(vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u8m2x4_tu(vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tu(vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u16mf4x4_tu(vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tu(vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u16mf2x4_tu(vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tu(vuint16m1x4_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u16m1x4_tu(vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tu(vuint16m2x4_t vd, + const uint16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u16m2x4_tu(vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tu(vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u32mf2x4_tu(vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tu(vuint32m1x4_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u32m1x4_tu(vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tu(vuint32m2x4_t vd, + const uint32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u32m2x4_tu(vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tu(vuint64m1x4_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u64m1x4_tu(vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tu(vuint64m2x4_t vd, + const uint64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u64m2x4_tu(vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tum(vbool64_t vm, + vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tum(vbool32_t vm, + vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f16m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f16m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tum(vbool64_t vm, + vfloat32mf2x4_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f32m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f32m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f64m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f64m2x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg4ei32_v_i8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxseg4ei32_v_i8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg4ei32_v_i8m1x4_tum(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, + const int8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxseg4ei32_v_i8m2x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i16m1x4_tum(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i16m2x4_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i32m1x4_tum(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i32m2x4_tum(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i64m1x4_tum(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i64m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg4ei32_v_u8m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, + const uint8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxseg4ei32_v_u8m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u16m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u16m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u32m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u32m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u64m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u64m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_tumu(vbool64_t vm, + vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_tumu(vbool32_t vm, + vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_tumu(vbool64_t vm, + vfloat32mf2x4_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg4ei32_v_i8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, + const int8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxseg4ei32_v_i8m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, + const uint8_t *rs1, + vuint32m8_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u8m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_tumu(vbool64_t vm, + vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_tumu(vbool32_t vm, + vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_tumu(vbool64_t vm, + vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x4_t test_vluxseg4ei32_v_f16mf4x4_mu(vbool64_t vm, + vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x4_t test_vluxseg4ei32_v_f16mf2x4_mu(vbool32_t vm, + vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x4_t test_vluxseg4ei32_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f16m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, vuint32m4_t rs2, size_t vl) { +vfloat16m2x4_t test_vluxseg4ei32_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f16m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x4_t test_vluxseg4ei32_v_f32mf2x4_mu(vbool64_t vm, + vfloat32mf2x4_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x4_t test_vluxseg4ei32_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg4ei32_v_f32m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, vuint32m2_t rs2, size_t vl) { +vfloat32m2x4_t test_vluxseg4ei32_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxseg4ei32_v_f32m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x4_t test_vluxseg4ei32_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f64m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, vuint32m1_t rs2, size_t vl) { +vfloat64m2x4_t test_vluxseg4ei32_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_f64m2x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x4_t test_vluxseg4ei32_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxseg4ei32_v_i8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x4_t test_vluxseg4ei32_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg4ei32_v_i8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x4_t test_vluxseg4ei32_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxseg4ei32_v_i8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x4_t test_vluxseg4ei32_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg4ei32_v_i8m1x4_mu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint32m8_t rs2, size_t vl) { +vint8m2x4_t test_vluxseg4ei32_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, + const int8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxseg4ei32_v_i8m2x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x4_t test_vluxseg4ei32_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x4_t test_vluxseg4ei32_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x4_t test_vluxseg4ei32_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxseg4ei32_v_i16m1x4_mu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint32m4_t rs2, size_t vl) { +vint16m2x4_t test_vluxseg4ei32_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg4ei32_v_i16m2x4_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x4_t test_vluxseg4ei32_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x4_t test_vluxseg4ei32_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg4ei32_v_i32m1x4_mu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint32m2_t rs2, size_t vl) { +vint32m2x4_t test_vluxseg4ei32_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxseg4ei32_v_i32m2x4_mu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x4_t test_vluxseg4ei32_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_i64m1x4_mu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint32m1_t rs2, size_t vl) { +vint64m2x4_t test_vluxseg4ei32_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg4ei32_v_i64m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x4_t test_vluxseg4ei32_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x4_t test_vluxseg4ei32_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x4_t test_vluxseg4ei32_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x4_t test_vluxseg4ei32_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg4ei32_v_u8m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint32m8_t rs2, size_t vl) { +vuint8m2x4_t test_vluxseg4ei32_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, + const uint8_t *rs1, vuint32m8_t rs2, + size_t vl) { return __riscv_vluxseg4ei32_v_u8m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x4_t test_vluxseg4ei32_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x4_t test_vluxseg4ei32_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x4_t test_vluxseg4ei32_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u16m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint16m2x4_t test_vluxseg4ei32_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u16m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x4_t test_vluxseg4ei32_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x4_t test_vluxseg4ei32_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u32m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint32m2x4_t test_vluxseg4ei32_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u32m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x4_t test_vluxseg4ei32_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u64m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint64m2x4_t test_vluxseg4ei32_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg4ei32_v_u64m2x4_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxseg4ei64.c b/auto-generated/policy_funcs/llvm-api-tests/vluxseg4ei64.c index 548c26d0f..dbdcf8692 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxseg4ei64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxseg4ei64.c @@ -1,567 +1,848 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tu(vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f16mf4x4_tu(vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tu(vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f16mf2x4_tu(vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tu(vfloat16m1x4_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tu(vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f16m1x4_tu(vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tu(vfloat16m2x4_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tu(vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f16m2x4_tu(vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tu(vfloat32mf2x4_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f32mf2x4_tu(vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tu(vfloat32m1x4_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tu(vfloat32m1x4_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg4ei64_v_f32m1x4_tu(vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tu(vfloat32m2x4_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tu(vfloat32m2x4_t vd, + const float *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxseg4ei64_v_f32m2x4_tu(vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tu(vfloat64m1x4_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tu(vfloat64m1x4_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f64m1x4_tu(vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tu(vfloat64m2x4_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tu(vfloat64m2x4_t vd, + const double *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f64m2x4_tu(vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i8mf8x4_tu(vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i8mf4x4_tu(vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i8mf2x4_tu(vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i8m1x4_tu(vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tu(vint16mf4x4_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i16mf4x4_tu(vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tu(vint16mf2x4_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i16mf2x4_tu(vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i16m1x4_tu(vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i16m2x4_tu(vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tu(vint32mf2x4_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i32mf2x4_tu(vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i32m1x4_tu(vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i32m2x4_tu(vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i64m1x4_tu(vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i64m2x4_tu(vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tu(vuint8mf8x4_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u8mf8x4_tu(vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tu(vuint8mf4x4_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u8mf4x4_tu(vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tu(vuint8mf2x4_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u8mf2x4_tu(vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u8m1x4_tu(vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tu(vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u16mf4x4_tu(vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tu(vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u16mf2x4_tu(vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tu(vuint16m1x4_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u16m1x4_tu(vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tu(vuint16m2x4_t vd, + const uint16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u16m2x4_tu(vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tu(vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u32mf2x4_tu(vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tu(vuint32m1x4_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u32m1x4_tu(vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tu(vuint32m2x4_t vd, + const uint32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u32m2x4_tu(vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tu(vuint64m1x4_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u64m1x4_tu(vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tu(vuint64m2x4_t vd, + const uint64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u64m2x4_tu(vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tum(vbool64_t vm, + vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tum(vbool32_t vm, + vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f16m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f16m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tum(vbool64_t vm, + vfloat32mf2x4_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f32m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f32m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f64m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f64m2x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxseg4ei64_v_i8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg4ei64_v_i8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxseg4ei64_v_i8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg4ei64_v_i8m1x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i16m1x4_tum(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i16m2x4_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i32m1x4_tum(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i32m2x4_tum(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i64m1x4_tum(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i64m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg4ei64_v_u8m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u16m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u16m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u32m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u32m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u64m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u64m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_tumu(vbool64_t vm, + vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_tumu(vbool32_t vm, + vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_tumu(vbool64_t vm, + vfloat32mf2x4_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg4ei64_v_i8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_tumu(vbool64_t vm, + vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_tumu(vbool32_t vm, + vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_tumu(vbool64_t vm, + vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x4_t test_vluxseg4ei64_v_f16mf4x4_mu(vbool64_t vm, + vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x4_t test_vluxseg4ei64_v_f16mf2x4_mu(vbool32_t vm, + vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x4_t test_vluxseg4ei64_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f16m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, vuint64m8_t rs2, size_t vl) { +vfloat16m2x4_t test_vluxseg4ei64_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f16m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x4_t test_vluxseg4ei64_v_f32mf2x4_mu(vbool64_t vm, + vfloat32mf2x4_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x4_t test_vluxseg4ei64_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg4ei64_v_f32m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, vuint64m4_t rs2, size_t vl) { +vfloat32m2x4_t test_vluxseg4ei64_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxseg4ei64_v_f32m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x4_t test_vluxseg4ei64_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f64m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, vuint64m2_t rs2, size_t vl) { +vfloat64m2x4_t test_vluxseg4ei64_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_f64m2x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x4_t test_vluxseg4ei64_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxseg4ei64_v_i8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x4_t test_vluxseg4ei64_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg4ei64_v_i8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x4_t test_vluxseg4ei64_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxseg4ei64_v_i8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x4_t test_vluxseg4ei64_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg4ei64_v_i8m1x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x4_t test_vluxseg4ei64_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x4_t test_vluxseg4ei64_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x4_t test_vluxseg4ei64_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxseg4ei64_v_i16m1x4_mu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint64m8_t rs2, size_t vl) { +vint16m2x4_t test_vluxseg4ei64_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg4ei64_v_i16m2x4_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x4_t test_vluxseg4ei64_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_i32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x4_t test_vluxseg4ei64_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg4ei64_v_i32m1x4_mu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint64m4_t rs2, size_t vl) { +vint32m2x4_t test_vluxseg4ei64_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxseg4ei64_v_i32m2x4_mu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x4_t test_vluxseg4ei64_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxseg4ei64_v_i64m1x4_mu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint64m2_t rs2, size_t vl) { +vint64m2x4_t test_vluxseg4ei64_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg4ei64_v_i64m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x4_t test_vluxseg4ei64_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x4_t test_vluxseg4ei64_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x4_t test_vluxseg4ei64_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x4_t test_vluxseg4ei64_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg4ei64_v_u8m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x4_t test_vluxseg4ei64_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x4_t test_vluxseg4ei64_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x4_t test_vluxseg4ei64_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u16m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint16m2x4_t test_vluxseg4ei64_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u16m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x4_t test_vluxseg4ei64_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x4_t test_vluxseg4ei64_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u32m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint32m2x4_t test_vluxseg4ei64_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u32m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x4_t test_vluxseg4ei64_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u64m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint64m2x4_t test_vluxseg4ei64_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg4ei64_v_u64m2x4_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxseg4ei8.c b/auto-generated/policy_funcs/llvm-api-tests/vluxseg4ei8.c index eccb6de56..b917c432b 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxseg4ei8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxseg4ei8.c @@ -1,599 +1,888 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tu(vfloat16mf4x4_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tu(vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_f16mf4x4_tu(vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tu(vfloat16mf2x4_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tu(vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_f16mf2x4_tu(vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tu(vfloat16m1x4_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tu(vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_f16m1x4_tu(vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tu(vfloat16m2x4_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tu(vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_f16m2x4_tu(vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tu(vfloat32mf2x4_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tu(vfloat32mf2x4_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_f32mf2x4_tu(vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tu(vfloat32m1x4_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tu(vfloat32m1x4_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_f32m1x4_tu(vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tu(vfloat32m2x4_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tu(vfloat32m2x4_t vd, + const float *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_f32m2x4_tu(vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tu(vfloat64m1x4_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tu(vfloat64m1x4_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_f64m1x4_tu(vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tu(vfloat64m2x4_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tu(vfloat64m2x4_t vd, + const double *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_f64m2x4_tu(vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tu(vint8mf8x4_t vd, const int8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_i8mf8x4_tu(vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tu(vint8mf4x4_t vd, const int8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_i8mf4x4_tu(vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tu(vint8mf2x4_t vd, const int8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_i8mf2x4_tu(vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tu(vint8m1x4_t vd, const int8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_i8m1x4_tu(vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tu(vint8m2x4_t vd, const int8_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_i8m2x4_tu(vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tu(vint16mf4x4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tu(vint16mf4x4_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_i16mf4x4_tu(vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tu(vint16mf2x4_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tu(vint16mf2x4_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_i16mf2x4_tu(vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tu(vint16m1x4_t vd, const int16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_i16m1x4_tu(vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tu(vint16m2x4_t vd, const int16_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_i16m2x4_tu(vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tu(vint32mf2x4_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tu(vint32mf2x4_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_i32mf2x4_tu(vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tu(vint32m1x4_t vd, const int32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_i32m1x4_tu(vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tu(vint32m2x4_t vd, const int32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_i32m2x4_tu(vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tu(vint64m1x4_t vd, const int64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_i64m1x4_tu(vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tu(vint64m2x4_t vd, const int64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_i64m2x4_tu(vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tu(vuint8mf8x4_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tu(vuint8mf8x4_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_u8mf8x4_tu(vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tu(vuint8mf4x4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tu(vuint8mf4x4_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_u8mf4x4_tu(vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tu(vuint8mf2x4_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tu(vuint8mf2x4_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_u8mf2x4_tu(vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tu(vuint8m1x4_t vd, const uint8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u8m1x4_tu(vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tu(vuint8m2x4_t vd, const uint8_t *rs1, + vuint8m2_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u8m2x4_tu(vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tu(vuint16mf4x4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tu(vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u16mf4x4_tu(vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tu(vuint16mf2x4_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tu(vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u16mf2x4_tu(vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tu(vuint16m1x4_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tu(vuint16m1x4_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u16m1x4_tu(vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tu(vuint16m2x4_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tu(vuint16m2x4_t vd, + const uint16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_u16m2x4_tu(vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tu(vuint32mf2x4_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tu(vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u32mf2x4_tu(vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tu(vuint32m1x4_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tu(vuint32m1x4_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u32m1x4_tu(vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tu(vuint32m2x4_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tu(vuint32m2x4_t vd, + const uint32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u32m2x4_tu(vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tu(vuint64m1x4_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tu(vuint64m1x4_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u64m1x4_tu(vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tu(vuint64m2x4_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tu(vuint64m2x4_t vd, + const uint64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u64m2x4_tu(vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tum(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tum(vbool64_t vm, + vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_f16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tum(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tum(vbool32_t vm, + vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_f16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tum(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_f16m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tum(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_f16m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tum(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tum(vbool64_t vm, + vfloat32mf2x4_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_f32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tum(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_f32m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tum(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_f32m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tum(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_f64m1x4_tum(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tum(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_f64m2x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tum(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_i8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tum(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_i8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tum(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_i8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tum(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_i8m1x4_tum(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tum(vbool4_t vm, vint8m2x4_t vd, + const int8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_i8m2x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tum(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_i16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tum(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_i16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tum(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_i16m1x4_tum(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tum(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_i16m2x4_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tum(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_i32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tum(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_i32m1x4_tum(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tum(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_i32m2x4_tum(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tum(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_i64m1x4_tum(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tum(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_i64m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tum(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u8mf8x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tum(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u8mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tum(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u8mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tum(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_u8m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tum(vbool4_t vm, vuint8m2x4_t vd, + const uint8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_u8m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tum(vbool64_t vm, vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u16mf4x4_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tum(vbool32_t vm, vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u16mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tum(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u16m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tum(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u16m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tum(vbool64_t vm, vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u32mf2x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tum(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u32m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tum(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u32m2x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tum(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u64m1x4_tum(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tum(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u64m2x4_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tumu(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_tumu(vbool64_t vm, + vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_f16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tumu(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_tumu(vbool32_t vm, + vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_f16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_tumu(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_f16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_tumu(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_f16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tumu(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_tumu(vbool64_t vm, + vfloat32mf2x4_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_f32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_tumu(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_f32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_tumu(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_f32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_tumu(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_f64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_tumu(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_f64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_tumu(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_i8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_tumu(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_i8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_tumu(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_i8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_tumu(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_i8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_tumu(vbool4_t vm, vint8m2x4_t vd, + const int8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_i8m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_tumu(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_i16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_tumu(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_i16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_tumu(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_i16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_tumu(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_i16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_tumu(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_i32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_tumu(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_i32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_tumu(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_i32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_tumu(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_i64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_tumu(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_i64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_tumu(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u8mf8x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_tumu(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u8mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_tumu(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u8mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_tumu(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_u8m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_tumu(vbool4_t vm, vuint8m2x4_t vd, + const uint8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_u8m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_tumu(vbool64_t vm, vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u16mf4x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_tumu(vbool32_t vm, vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u16mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_tumu(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u16m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_tumu(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u16m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_tumu(vbool64_t vm, vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u32mf2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_tumu(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u32m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_tumu(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u32m2x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_tumu(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u64m1x4_tumu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_tumu(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u64m2x4_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x4_t test_vluxseg4ei8_v_f16mf4x4_mu(vbool64_t vm, vfloat16mf4x4_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_f16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x4_t test_vluxseg4ei8_v_f16mf2x4_mu(vbool32_t vm, vfloat16mf2x4_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_f16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x4_t test_vluxseg4ei8_v_f16m1x4_mu(vbool16_t vm, vfloat16m1x4_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_f16m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, const _Float16 *rs1, vuint8m1_t rs2, size_t vl) { +vfloat16m2x4_t test_vluxseg4ei8_v_f16m2x4_mu(vbool8_t vm, vfloat16m2x4_t vd, + const _Float16 *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_f16m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x4_t test_vluxseg4ei8_v_f32mf2x4_mu(vbool64_t vm, vfloat32mf2x4_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_f32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x4_t test_vluxseg4ei8_v_f32m1x4_mu(vbool32_t vm, vfloat32m1x4_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_f32m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, const float *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat32m2x4_t test_vluxseg4ei8_v_f32m2x4_mu(vbool16_t vm, vfloat32m2x4_t vd, + const float *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_f32m2x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x4_t test_vluxseg4ei8_v_f64m1x4_mu(vbool64_t vm, vfloat64m1x4_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_f64m1x4_mu(vm, vd, rs1, rs2, vl); } -vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, const double *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat64m2x4_t test_vluxseg4ei8_v_f64m2x4_mu(vbool32_t vm, vfloat64m2x4_t vd, + const double *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_f64m2x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x4_t test_vluxseg4ei8_v_i8mf8x4_mu(vbool64_t vm, vint8mf8x4_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_i8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x4_t test_vluxseg4ei8_v_i8mf4x4_mu(vbool32_t vm, vint8mf4x4_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_i8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x4_t test_vluxseg4ei8_v_i8mf2x4_mu(vbool16_t vm, vint8mf2x4_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_i8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x4_t test_vluxseg4ei8_v_i8m1x4_mu(vbool8_t vm, vint8m1x4_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_i8m1x4_mu(vm, vd, rs1, rs2, vl); } -vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, const int8_t *rs1, vuint8m2_t rs2, size_t vl) { +vint8m2x4_t test_vluxseg4ei8_v_i8m2x4_mu(vbool4_t vm, vint8m2x4_t vd, + const int8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_i8m2x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x4_t test_vluxseg4ei8_v_i16mf4x4_mu(vbool64_t vm, vint16mf4x4_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_i16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x4_t test_vluxseg4ei8_v_i16mf2x4_mu(vbool32_t vm, vint16mf2x4_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_i16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x4_t test_vluxseg4ei8_v_i16m1x4_mu(vbool16_t vm, vint16m1x4_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_i16m1x4_mu(vm, vd, rs1, rs2, vl); } -vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, const int16_t *rs1, vuint8m1_t rs2, size_t vl) { +vint16m2x4_t test_vluxseg4ei8_v_i16m2x4_mu(vbool8_t vm, vint16m2x4_t vd, + const int16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_i16m2x4_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x4_t test_vluxseg4ei8_v_i32mf2x4_mu(vbool64_t vm, vint32mf2x4_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_i32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x4_t test_vluxseg4ei8_v_i32m1x4_mu(vbool32_t vm, vint32m1x4_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_i32m1x4_mu(vm, vd, rs1, rs2, vl); } -vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, const int32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint32m2x4_t test_vluxseg4ei8_v_i32m2x4_mu(vbool16_t vm, vint32m2x4_t vd, + const int32_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_i32m2x4_mu(vm, vd, rs1, rs2, vl); } -vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x4_t test_vluxseg4ei8_v_i64m1x4_mu(vbool64_t vm, vint64m1x4_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_i64m1x4_mu(vm, vd, rs1, rs2, vl); } -vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, const int64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint64m2x4_t test_vluxseg4ei8_v_i64m2x4_mu(vbool32_t vm, vint64m2x4_t vd, + const int64_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_i64m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x4_t test_vluxseg4ei8_v_u8mf8x4_mu(vbool64_t vm, vuint8mf8x4_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_u8mf8x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x4_t test_vluxseg4ei8_v_u8mf4x4_mu(vbool32_t vm, vuint8mf4x4_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_u8mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x4_t test_vluxseg4ei8_v_u8mf2x4_mu(vbool16_t vm, vuint8mf2x4_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_u8mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x4_t test_vluxseg4ei8_v_u8m1x4_mu(vbool8_t vm, vuint8m1x4_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_u8m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, const uint8_t *rs1, vuint8m2_t rs2, size_t vl) { +vuint8m2x4_t test_vluxseg4ei8_v_u8m2x4_mu(vbool4_t vm, vuint8m2x4_t vd, + const uint8_t *rs1, vuint8m2_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_u8m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x4_t test_vluxseg4ei8_v_u16mf4x4_mu(vbool64_t vm, vuint16mf4x4_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u16mf4x4_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x4_t test_vluxseg4ei8_v_u16mf2x4_mu(vbool32_t vm, vuint16mf2x4_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u16mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x4_t test_vluxseg4ei8_v_u16m1x4_mu(vbool16_t vm, vuint16m1x4_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u16m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, const uint16_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint16m2x4_t test_vluxseg4ei8_v_u16m2x4_mu(vbool8_t vm, vuint16m2x4_t vd, + const uint16_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg4ei8_v_u16m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x4_t test_vluxseg4ei8_v_u32mf2x4_mu(vbool64_t vm, vuint32mf2x4_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u32mf2x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x4_t test_vluxseg4ei8_v_u32m1x4_mu(vbool32_t vm, vuint32m1x4_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u32m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, const uint32_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint32m2x4_t test_vluxseg4ei8_v_u32m2x4_mu(vbool16_t vm, vuint32m2x4_t vd, + const uint32_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u32m2x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x4_t test_vluxseg4ei8_v_u64m1x4_mu(vbool64_t vm, vuint64m1x4_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u64m1x4_mu(vm, vd, rs1, rs2, vl); } -vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, const uint64_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint64m2x4_t test_vluxseg4ei8_v_u64m2x4_mu(vbool32_t vm, vuint64m2x4_t vd, + const uint64_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg4ei8_v_u64m2x4_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxseg5ei16.c b/auto-generated/policy_funcs/llvm-api-tests/vluxseg5ei16.c index d87bb6883..db7fc7016 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxseg5ei16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxseg5ei16.c @@ -1,423 +1,635 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tu(vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_f16mf4x5_tu(vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tu(vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_f16mf2x5_tu(vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tu(vfloat16m1x5_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tu(vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_f16m1x5_tu(vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tu(vfloat32mf2x5_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_f32mf2x5_tu(vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tu(vfloat32m1x5_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tu(vfloat32m1x5_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_f32m1x5_tu(vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tu(vfloat64m1x5_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tu(vfloat64m1x5_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_f64m1x5_tu(vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i8mf8x5_tu(vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i8mf4x5_tu(vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i8mf2x5_tu(vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i8m1x5_tu(vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tu(vint16mf4x5_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i16mf4x5_tu(vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tu(vint16mf2x5_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i16mf2x5_tu(vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i16m1x5_tu(vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tu(vint32mf2x5_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i32mf2x5_tu(vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i32m1x5_tu(vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i64m1x5_tu(vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tu(vuint8mf8x5_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u8mf8x5_tu(vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tu(vuint8mf4x5_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u8mf4x5_tu(vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tu(vuint8mf2x5_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u8mf2x5_tu(vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u8m1x5_tu(vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tu(vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u16mf4x5_tu(vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tu(vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u16mf2x5_tu(vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tu(vuint16m1x5_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u16m1x5_tu(vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tu(vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u32mf2x5_tu(vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tu(vuint32m1x5_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u32m1x5_tu(vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tu(vuint64m1x5_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u64m1x5_tu(vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tum(vbool64_t vm, + vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_f16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tum(vbool32_t vm, + vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_f16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_f16m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tum(vbool64_t vm, + vfloat32mf2x5_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_f32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_f32m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_f64m1x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxseg5ei16_v_i8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg5ei16_v_i8m1x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i16m1x5_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i32m1x5_tum(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i64m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg5ei16_v_u8m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u16m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u32m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u64m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_tumu(vbool64_t vm, + vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_f16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_tumu(vbool32_t vm, + vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_f16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_f16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_tumu(vbool64_t vm, + vfloat32mf2x5_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_f32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_f32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_f64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg5ei16_v_i8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_tumu(vbool64_t vm, + vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_tumu(vbool32_t vm, + vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_tumu(vbool64_t vm, + vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x5_t test_vluxseg5ei16_v_f16mf4x5_mu(vbool64_t vm, + vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_f16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x5_t test_vluxseg5ei16_v_f16mf2x5_mu(vbool32_t vm, + vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_f16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x5_t test_vluxseg5ei16_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_f16m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x5_t test_vluxseg5ei16_v_f32mf2x5_mu(vbool64_t vm, + vfloat32mf2x5_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_f32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x5_t test_vluxseg5ei16_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_f32m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x5_t test_vluxseg5ei16_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_f64m1x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x5_t test_vluxseg5ei16_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxseg5ei16_v_i8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x5_t test_vluxseg5ei16_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxseg5ei16_v_i8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x5_t test_vluxseg5ei16_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxseg5ei16_v_i8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x5_t test_vluxseg5ei16_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg5ei16_v_i8m1x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x5_t test_vluxseg5ei16_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x5_t test_vluxseg5ei16_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x5_t test_vluxseg5ei16_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxseg5ei16_v_i16m1x5_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x5_t test_vluxseg5ei16_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x5_t test_vluxseg5ei16_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i32m1x5_mu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x5_t test_vluxseg5ei16_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_i64m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x5_t test_vluxseg5ei16_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x5_t test_vluxseg5ei16_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x5_t test_vluxseg5ei16_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x5_t test_vluxseg5ei16_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg5ei16_v_u8m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x5_t test_vluxseg5ei16_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x5_t test_vluxseg5ei16_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x5_t test_vluxseg5ei16_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u16m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x5_t test_vluxseg5ei16_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x5_t test_vluxseg5ei16_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u32m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x5_t test_vluxseg5ei16_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei16_v_u64m1x5_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxseg5ei32.c b/auto-generated/policy_funcs/llvm-api-tests/vluxseg5ei32.c index 9418dd1eb..2a95b47d4 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxseg5ei32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxseg5ei32.c @@ -1,423 +1,635 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tu(vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_f16mf4x5_tu(vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tu(vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_f16mf2x5_tu(vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tu(vfloat16m1x5_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tu(vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_f16m1x5_tu(vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tu(vfloat32mf2x5_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_f32mf2x5_tu(vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tu(vfloat32m1x5_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tu(vfloat32m1x5_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg5ei32_v_f32m1x5_tu(vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tu(vfloat64m1x5_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tu(vfloat64m1x5_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_f64m1x5_tu(vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_i8mf8x5_tu(vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_i8mf4x5_tu(vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_i8mf2x5_tu(vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_i8m1x5_tu(vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tu(vint16mf4x5_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_i16mf4x5_tu(vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tu(vint16mf2x5_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_i16mf2x5_tu(vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_i16m1x5_tu(vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tu(vint32mf2x5_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_i32mf2x5_tu(vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_i32m1x5_tu(vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_i64m1x5_tu(vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tu(vuint8mf8x5_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u8mf8x5_tu(vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tu(vuint8mf4x5_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u8mf4x5_tu(vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tu(vuint8mf2x5_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u8mf2x5_tu(vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u8m1x5_tu(vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tu(vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u16mf4x5_tu(vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tu(vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u16mf2x5_tu(vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tu(vuint16m1x5_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u16m1x5_tu(vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tu(vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u32mf2x5_tu(vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tu(vuint32m1x5_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u32m1x5_tu(vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tu(vuint64m1x5_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u64m1x5_tu(vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tum(vbool64_t vm, + vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_f16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tum(vbool32_t vm, + vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_f16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_f16m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tum(vbool64_t vm, + vfloat32mf2x5_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_f32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_f32m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_f64m1x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_i8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg5ei32_v_i8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxseg5ei32_v_i8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg5ei32_v_i8m1x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_i16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_i16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_i16m1x5_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_i32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_i32m1x5_tum(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_i64m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg5ei32_v_u8m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u16m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u32m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u64m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_tumu(vbool64_t vm, + vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_f16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_tumu(vbool32_t vm, + vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_f16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_f16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_tumu(vbool64_t vm, + vfloat32mf2x5_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_f32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_f32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_f64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_i8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_i8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_i8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg5ei32_v_i8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_i16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_i16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_i16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_i32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_i32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_i64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_tumu(vbool64_t vm, + vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_tumu(vbool32_t vm, + vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_tumu(vbool64_t vm, + vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x5_t test_vluxseg5ei32_v_f16mf4x5_mu(vbool64_t vm, + vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_f16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x5_t test_vluxseg5ei32_v_f16mf2x5_mu(vbool32_t vm, + vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_f16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x5_t test_vluxseg5ei32_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_f16m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x5_t test_vluxseg5ei32_v_f32mf2x5_mu(vbool64_t vm, + vfloat32mf2x5_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_f32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x5_t test_vluxseg5ei32_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg5ei32_v_f32m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x5_t test_vluxseg5ei32_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_f64m1x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x5_t test_vluxseg5ei32_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxseg5ei32_v_i8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x5_t test_vluxseg5ei32_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg5ei32_v_i8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x5_t test_vluxseg5ei32_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxseg5ei32_v_i8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x5_t test_vluxseg5ei32_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg5ei32_v_i8m1x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x5_t test_vluxseg5ei32_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_i16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x5_t test_vluxseg5ei32_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_i16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x5_t test_vluxseg5ei32_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxseg5ei32_v_i16m1x5_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x5_t test_vluxseg5ei32_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_i32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x5_t test_vluxseg5ei32_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg5ei32_v_i32m1x5_mu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x5_t test_vluxseg5ei32_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_i64m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x5_t test_vluxseg5ei32_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x5_t test_vluxseg5ei32_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x5_t test_vluxseg5ei32_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x5_t test_vluxseg5ei32_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg5ei32_v_u8m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x5_t test_vluxseg5ei32_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x5_t test_vluxseg5ei32_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x5_t test_vluxseg5ei32_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u16m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x5_t test_vluxseg5ei32_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x5_t test_vluxseg5ei32_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u32m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x5_t test_vluxseg5ei32_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei32_v_u64m1x5_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxseg5ei64.c b/auto-generated/policy_funcs/llvm-api-tests/vluxseg5ei64.c index 73f5186d8..9e7c03347 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxseg5ei64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxseg5ei64.c @@ -1,423 +1,635 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tu(vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_f16mf4x5_tu(vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tu(vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_f16mf2x5_tu(vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tu(vfloat16m1x5_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tu(vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_f16m1x5_tu(vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tu(vfloat32mf2x5_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_f32mf2x5_tu(vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tu(vfloat32m1x5_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tu(vfloat32m1x5_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg5ei64_v_f32m1x5_tu(vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tu(vfloat64m1x5_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tu(vfloat64m1x5_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_f64m1x5_tu(vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_i8mf8x5_tu(vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_i8mf4x5_tu(vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_i8mf2x5_tu(vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_i8m1x5_tu(vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tu(vint16mf4x5_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_i16mf4x5_tu(vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tu(vint16mf2x5_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_i16mf2x5_tu(vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_i16m1x5_tu(vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tu(vint32mf2x5_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_i32mf2x5_tu(vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_i32m1x5_tu(vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_i64m1x5_tu(vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tu(vuint8mf8x5_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u8mf8x5_tu(vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tu(vuint8mf4x5_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u8mf4x5_tu(vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tu(vuint8mf2x5_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u8mf2x5_tu(vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u8m1x5_tu(vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tu(vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u16mf4x5_tu(vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tu(vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u16mf2x5_tu(vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tu(vuint16m1x5_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u16m1x5_tu(vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tu(vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u32mf2x5_tu(vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tu(vuint32m1x5_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u32m1x5_tu(vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tu(vuint64m1x5_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u64m1x5_tu(vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tum(vbool64_t vm, + vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_f16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tum(vbool32_t vm, + vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_f16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_f16m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tum(vbool64_t vm, + vfloat32mf2x5_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_f32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_f32m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_f64m1x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxseg5ei64_v_i8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg5ei64_v_i8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxseg5ei64_v_i8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg5ei64_v_i8m1x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_i16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_i16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_i16m1x5_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_i32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_i32m1x5_tum(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_i64m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg5ei64_v_u8m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u16m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u32m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u64m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_tumu(vbool64_t vm, + vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_f16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_tumu(vbool32_t vm, + vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_f16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_f16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_tumu(vbool64_t vm, + vfloat32mf2x5_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_f32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_f32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_f64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_i8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_i8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_i8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg5ei64_v_i8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_i16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_i16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_i16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_i32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_i32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_i64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_tumu(vbool64_t vm, + vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_tumu(vbool32_t vm, + vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_tumu(vbool64_t vm, + vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x5_t test_vluxseg5ei64_v_f16mf4x5_mu(vbool64_t vm, + vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_f16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x5_t test_vluxseg5ei64_v_f16mf2x5_mu(vbool32_t vm, + vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_f16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x5_t test_vluxseg5ei64_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_f16m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x5_t test_vluxseg5ei64_v_f32mf2x5_mu(vbool64_t vm, + vfloat32mf2x5_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_f32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x5_t test_vluxseg5ei64_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg5ei64_v_f32m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x5_t test_vluxseg5ei64_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_f64m1x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x5_t test_vluxseg5ei64_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxseg5ei64_v_i8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x5_t test_vluxseg5ei64_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg5ei64_v_i8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x5_t test_vluxseg5ei64_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxseg5ei64_v_i8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x5_t test_vluxseg5ei64_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg5ei64_v_i8m1x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x5_t test_vluxseg5ei64_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_i16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x5_t test_vluxseg5ei64_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_i16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x5_t test_vluxseg5ei64_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxseg5ei64_v_i16m1x5_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x5_t test_vluxseg5ei64_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_i32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x5_t test_vluxseg5ei64_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg5ei64_v_i32m1x5_mu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x5_t test_vluxseg5ei64_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxseg5ei64_v_i64m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x5_t test_vluxseg5ei64_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x5_t test_vluxseg5ei64_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x5_t test_vluxseg5ei64_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x5_t test_vluxseg5ei64_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg5ei64_v_u8m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x5_t test_vluxseg5ei64_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x5_t test_vluxseg5ei64_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x5_t test_vluxseg5ei64_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u16m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x5_t test_vluxseg5ei64_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x5_t test_vluxseg5ei64_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u32m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x5_t test_vluxseg5ei64_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg5ei64_v_u64m1x5_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxseg5ei8.c b/auto-generated/policy_funcs/llvm-api-tests/vluxseg5ei8.c index 2a8c95a19..57b3dc986 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxseg5ei8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxseg5ei8.c @@ -1,423 +1,629 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tu(vfloat16mf4x5_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tu(vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_f16mf4x5_tu(vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tu(vfloat16mf2x5_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tu(vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_f16mf2x5_tu(vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tu(vfloat16m1x5_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tu(vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_f16m1x5_tu(vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tu(vfloat32mf2x5_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tu(vfloat32mf2x5_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_f32mf2x5_tu(vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tu(vfloat32m1x5_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tu(vfloat32m1x5_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_f32m1x5_tu(vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tu(vfloat64m1x5_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tu(vfloat64m1x5_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_f64m1x5_tu(vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tu(vint8mf8x5_t vd, const int8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_i8mf8x5_tu(vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tu(vint8mf4x5_t vd, const int8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_i8mf4x5_tu(vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tu(vint8mf2x5_t vd, const int8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_i8mf2x5_tu(vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tu(vint8m1x5_t vd, const int8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_i8m1x5_tu(vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tu(vint16mf4x5_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tu(vint16mf4x5_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_i16mf4x5_tu(vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tu(vint16mf2x5_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tu(vint16mf2x5_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_i16mf2x5_tu(vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tu(vint16m1x5_t vd, const int16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_i16m1x5_tu(vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tu(vint32mf2x5_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tu(vint32mf2x5_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_i32mf2x5_tu(vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tu(vint32m1x5_t vd, const int32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_i32m1x5_tu(vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tu(vint64m1x5_t vd, const int64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_i64m1x5_tu(vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tu(vuint8mf8x5_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tu(vuint8mf8x5_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_u8mf8x5_tu(vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tu(vuint8mf4x5_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tu(vuint8mf4x5_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_u8mf4x5_tu(vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tu(vuint8mf2x5_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tu(vuint8mf2x5_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_u8mf2x5_tu(vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tu(vuint8m1x5_t vd, const uint8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_u8m1x5_tu(vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tu(vuint16mf4x5_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tu(vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_u16mf4x5_tu(vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tu(vuint16mf2x5_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tu(vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_u16mf2x5_tu(vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tu(vuint16m1x5_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tu(vuint16m1x5_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_u16m1x5_tu(vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tu(vuint32mf2x5_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tu(vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_u32mf2x5_tu(vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tu(vuint32m1x5_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tu(vuint32m1x5_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_u32m1x5_tu(vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tu(vuint64m1x5_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tu(vuint64m1x5_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_u64m1x5_tu(vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tum(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tum(vbool64_t vm, + vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_f16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tum(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tum(vbool32_t vm, + vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_f16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tum(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_f16m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tum(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tum(vbool64_t vm, + vfloat32mf2x5_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_f32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tum(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_f32m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tum(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_f64m1x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tum(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_i8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tum(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_i8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tum(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_i8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tum(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_i8m1x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tum(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_i16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tum(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_i16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tum(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_i16m1x5_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tum(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_i32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tum(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_i32m1x5_tum(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tum(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_i64m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tum(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_u8mf8x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tum(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_u8mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tum(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_u8mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tum(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_u8m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tum(vbool64_t vm, vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_u16mf4x5_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tum(vbool32_t vm, vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_u16mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tum(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_u16m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tum(vbool64_t vm, vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_u32mf2x5_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tum(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_u32m1x5_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tum(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_u64m1x5_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tumu(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_tumu(vbool64_t vm, + vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_f16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tumu(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_tumu(vbool32_t vm, + vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_f16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_tumu(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_f16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tumu(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_tumu(vbool64_t vm, + vfloat32mf2x5_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_f32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_tumu(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_f32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_tumu(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_f64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_tumu(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_i8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_tumu(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_i8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_tumu(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_i8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_tumu(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_i8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_tumu(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_i16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_tumu(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_i16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_tumu(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_i16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_tumu(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_i32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_tumu(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_i32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_tumu(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_i64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_tumu(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_u8mf8x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_tumu(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_u8mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_tumu(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_u8mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_tumu(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_u8m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_tumu(vbool64_t vm, vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_u16mf4x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_tumu(vbool32_t vm, vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_u16mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_tumu(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_u16m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_tumu(vbool64_t vm, vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_u32mf2x5_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_tumu(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_u32m1x5_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_tumu(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_u64m1x5_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x5_t test_vluxseg5ei8_v_f16mf4x5_mu(vbool64_t vm, vfloat16mf4x5_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_f16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x5_t test_vluxseg5ei8_v_f16mf2x5_mu(vbool32_t vm, vfloat16mf2x5_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_f16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x5_t test_vluxseg5ei8_v_f16m1x5_mu(vbool16_t vm, vfloat16m1x5_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_f16m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x5_t test_vluxseg5ei8_v_f32mf2x5_mu(vbool64_t vm, vfloat32mf2x5_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_f32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x5_t test_vluxseg5ei8_v_f32m1x5_mu(vbool32_t vm, vfloat32m1x5_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_f32m1x5_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x5_t test_vluxseg5ei8_v_f64m1x5_mu(vbool64_t vm, vfloat64m1x5_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_f64m1x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x5_t test_vluxseg5ei8_v_i8mf8x5_mu(vbool64_t vm, vint8mf8x5_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_i8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x5_t test_vluxseg5ei8_v_i8mf4x5_mu(vbool32_t vm, vint8mf4x5_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_i8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x5_t test_vluxseg5ei8_v_i8mf2x5_mu(vbool16_t vm, vint8mf2x5_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_i8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x5_t test_vluxseg5ei8_v_i8m1x5_mu(vbool8_t vm, vint8m1x5_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_i8m1x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x5_t test_vluxseg5ei8_v_i16mf4x5_mu(vbool64_t vm, vint16mf4x5_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_i16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x5_t test_vluxseg5ei8_v_i16mf2x5_mu(vbool32_t vm, vint16mf2x5_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_i16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x5_t test_vluxseg5ei8_v_i16m1x5_mu(vbool16_t vm, vint16m1x5_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_i16m1x5_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x5_t test_vluxseg5ei8_v_i32mf2x5_mu(vbool64_t vm, vint32mf2x5_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_i32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x5_t test_vluxseg5ei8_v_i32m1x5_mu(vbool32_t vm, vint32m1x5_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_i32m1x5_mu(vm, vd, rs1, rs2, vl); } -vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x5_t test_vluxseg5ei8_v_i64m1x5_mu(vbool64_t vm, vint64m1x5_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_i64m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x5_t test_vluxseg5ei8_v_u8mf8x5_mu(vbool64_t vm, vuint8mf8x5_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_u8mf8x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x5_t test_vluxseg5ei8_v_u8mf4x5_mu(vbool32_t vm, vuint8mf4x5_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_u8mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x5_t test_vluxseg5ei8_v_u8mf2x5_mu(vbool16_t vm, vuint8mf2x5_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_u8mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x5_t test_vluxseg5ei8_v_u8m1x5_mu(vbool8_t vm, vuint8m1x5_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg5ei8_v_u8m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x5_t test_vluxseg5ei8_v_u16mf4x5_mu(vbool64_t vm, vuint16mf4x5_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_u16mf4x5_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x5_t test_vluxseg5ei8_v_u16mf2x5_mu(vbool32_t vm, vuint16mf2x5_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_u16mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x5_t test_vluxseg5ei8_v_u16m1x5_mu(vbool16_t vm, vuint16m1x5_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_u16m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x5_t test_vluxseg5ei8_v_u32mf2x5_mu(vbool64_t vm, vuint32mf2x5_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_u32mf2x5_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x5_t test_vluxseg5ei8_v_u32m1x5_mu(vbool32_t vm, vuint32m1x5_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_u32m1x5_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x5_t test_vluxseg5ei8_v_u64m1x5_mu(vbool64_t vm, vuint64m1x5_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg5ei8_v_u64m1x5_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxseg6ei16.c b/auto-generated/policy_funcs/llvm-api-tests/vluxseg6ei16.c index 5bc090112..e74a7cd0b 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxseg6ei16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxseg6ei16.c @@ -1,423 +1,635 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tu(vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_f16mf4x6_tu(vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tu(vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_f16mf2x6_tu(vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tu(vfloat16m1x6_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tu(vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_f16m1x6_tu(vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tu(vfloat32mf2x6_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_f32mf2x6_tu(vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tu(vfloat32m1x6_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tu(vfloat32m1x6_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_f32m1x6_tu(vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tu(vfloat64m1x6_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tu(vfloat64m1x6_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_f64m1x6_tu(vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i8mf8x6_tu(vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i8mf4x6_tu(vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i8mf2x6_tu(vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i8m1x6_tu(vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tu(vint16mf4x6_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i16mf4x6_tu(vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tu(vint16mf2x6_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i16mf2x6_tu(vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i16m1x6_tu(vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tu(vint32mf2x6_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i32mf2x6_tu(vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i32m1x6_tu(vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i64m1x6_tu(vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tu(vuint8mf8x6_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u8mf8x6_tu(vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tu(vuint8mf4x6_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u8mf4x6_tu(vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tu(vuint8mf2x6_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u8mf2x6_tu(vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u8m1x6_tu(vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tu(vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u16mf4x6_tu(vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tu(vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u16mf2x6_tu(vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tu(vuint16m1x6_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u16m1x6_tu(vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tu(vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u32mf2x6_tu(vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tu(vuint32m1x6_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u32m1x6_tu(vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tu(vuint64m1x6_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u64m1x6_tu(vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tum(vbool64_t vm, + vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_f16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tum(vbool32_t vm, + vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_f16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_f16m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tum(vbool64_t vm, + vfloat32mf2x6_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_f32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_f32m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_f64m1x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxseg6ei16_v_i8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg6ei16_v_i8m1x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i16m1x6_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i32m1x6_tum(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i64m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg6ei16_v_u8m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u16m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u32m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u64m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_tumu(vbool64_t vm, + vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_f16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_tumu(vbool32_t vm, + vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_f16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_f16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_tumu(vbool64_t vm, + vfloat32mf2x6_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_f32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_f32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_f64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg6ei16_v_i8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_tumu(vbool64_t vm, + vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_tumu(vbool32_t vm, + vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_tumu(vbool64_t vm, + vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x6_t test_vluxseg6ei16_v_f16mf4x6_mu(vbool64_t vm, + vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_f16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x6_t test_vluxseg6ei16_v_f16mf2x6_mu(vbool32_t vm, + vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_f16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x6_t test_vluxseg6ei16_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_f16m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x6_t test_vluxseg6ei16_v_f32mf2x6_mu(vbool64_t vm, + vfloat32mf2x6_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_f32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x6_t test_vluxseg6ei16_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_f32m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x6_t test_vluxseg6ei16_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_f64m1x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x6_t test_vluxseg6ei16_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxseg6ei16_v_i8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x6_t test_vluxseg6ei16_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxseg6ei16_v_i8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x6_t test_vluxseg6ei16_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxseg6ei16_v_i8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x6_t test_vluxseg6ei16_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg6ei16_v_i8m1x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x6_t test_vluxseg6ei16_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x6_t test_vluxseg6ei16_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x6_t test_vluxseg6ei16_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxseg6ei16_v_i16m1x6_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x6_t test_vluxseg6ei16_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x6_t test_vluxseg6ei16_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i32m1x6_mu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x6_t test_vluxseg6ei16_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_i64m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x6_t test_vluxseg6ei16_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x6_t test_vluxseg6ei16_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x6_t test_vluxseg6ei16_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x6_t test_vluxseg6ei16_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg6ei16_v_u8m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x6_t test_vluxseg6ei16_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x6_t test_vluxseg6ei16_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x6_t test_vluxseg6ei16_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u16m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x6_t test_vluxseg6ei16_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x6_t test_vluxseg6ei16_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u32m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x6_t test_vluxseg6ei16_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei16_v_u64m1x6_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxseg6ei32.c b/auto-generated/policy_funcs/llvm-api-tests/vluxseg6ei32.c index 6ad444b79..f5c4a8379 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxseg6ei32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxseg6ei32.c @@ -1,423 +1,635 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tu(vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_f16mf4x6_tu(vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tu(vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_f16mf2x6_tu(vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tu(vfloat16m1x6_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tu(vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_f16m1x6_tu(vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tu(vfloat32mf2x6_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_f32mf2x6_tu(vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tu(vfloat32m1x6_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tu(vfloat32m1x6_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg6ei32_v_f32m1x6_tu(vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tu(vfloat64m1x6_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tu(vfloat64m1x6_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_f64m1x6_tu(vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_i8mf8x6_tu(vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_i8mf4x6_tu(vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_i8mf2x6_tu(vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_i8m1x6_tu(vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tu(vint16mf4x6_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_i16mf4x6_tu(vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tu(vint16mf2x6_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_i16mf2x6_tu(vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_i16m1x6_tu(vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tu(vint32mf2x6_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_i32mf2x6_tu(vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_i32m1x6_tu(vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_i64m1x6_tu(vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tu(vuint8mf8x6_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u8mf8x6_tu(vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tu(vuint8mf4x6_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u8mf4x6_tu(vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tu(vuint8mf2x6_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u8mf2x6_tu(vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u8m1x6_tu(vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tu(vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u16mf4x6_tu(vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tu(vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u16mf2x6_tu(vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tu(vuint16m1x6_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u16m1x6_tu(vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tu(vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u32mf2x6_tu(vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tu(vuint32m1x6_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u32m1x6_tu(vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tu(vuint64m1x6_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u64m1x6_tu(vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tum(vbool64_t vm, + vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_f16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tum(vbool32_t vm, + vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_f16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_f16m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tum(vbool64_t vm, + vfloat32mf2x6_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_f32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_f32m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_f64m1x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_i8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg6ei32_v_i8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxseg6ei32_v_i8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg6ei32_v_i8m1x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_i16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_i16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_i16m1x6_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_i32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_i32m1x6_tum(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_i64m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg6ei32_v_u8m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u16m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u32m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u64m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_tumu(vbool64_t vm, + vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_f16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_tumu(vbool32_t vm, + vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_f16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_f16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_tumu(vbool64_t vm, + vfloat32mf2x6_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_f32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_f32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_f64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_i8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_i8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_i8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg6ei32_v_i8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_i16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_i16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_i16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_i32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_i32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_i64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_tumu(vbool64_t vm, + vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_tumu(vbool32_t vm, + vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_tumu(vbool64_t vm, + vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x6_t test_vluxseg6ei32_v_f16mf4x6_mu(vbool64_t vm, + vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_f16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x6_t test_vluxseg6ei32_v_f16mf2x6_mu(vbool32_t vm, + vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_f16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x6_t test_vluxseg6ei32_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_f16m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x6_t test_vluxseg6ei32_v_f32mf2x6_mu(vbool64_t vm, + vfloat32mf2x6_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_f32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x6_t test_vluxseg6ei32_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg6ei32_v_f32m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x6_t test_vluxseg6ei32_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_f64m1x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x6_t test_vluxseg6ei32_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxseg6ei32_v_i8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x6_t test_vluxseg6ei32_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg6ei32_v_i8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x6_t test_vluxseg6ei32_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxseg6ei32_v_i8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x6_t test_vluxseg6ei32_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg6ei32_v_i8m1x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x6_t test_vluxseg6ei32_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_i16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x6_t test_vluxseg6ei32_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_i16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x6_t test_vluxseg6ei32_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxseg6ei32_v_i16m1x6_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x6_t test_vluxseg6ei32_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_i32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x6_t test_vluxseg6ei32_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg6ei32_v_i32m1x6_mu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x6_t test_vluxseg6ei32_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_i64m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x6_t test_vluxseg6ei32_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x6_t test_vluxseg6ei32_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x6_t test_vluxseg6ei32_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x6_t test_vluxseg6ei32_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg6ei32_v_u8m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x6_t test_vluxseg6ei32_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x6_t test_vluxseg6ei32_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x6_t test_vluxseg6ei32_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u16m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x6_t test_vluxseg6ei32_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x6_t test_vluxseg6ei32_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u32m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x6_t test_vluxseg6ei32_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei32_v_u64m1x6_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxseg6ei64.c b/auto-generated/policy_funcs/llvm-api-tests/vluxseg6ei64.c index 47ecf4875..9cbc1cadf 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxseg6ei64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxseg6ei64.c @@ -1,423 +1,635 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tu(vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_f16mf4x6_tu(vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tu(vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_f16mf2x6_tu(vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tu(vfloat16m1x6_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tu(vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_f16m1x6_tu(vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tu(vfloat32mf2x6_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_f32mf2x6_tu(vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tu(vfloat32m1x6_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tu(vfloat32m1x6_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg6ei64_v_f32m1x6_tu(vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tu(vfloat64m1x6_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tu(vfloat64m1x6_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_f64m1x6_tu(vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_i8mf8x6_tu(vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_i8mf4x6_tu(vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_i8mf2x6_tu(vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_i8m1x6_tu(vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tu(vint16mf4x6_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_i16mf4x6_tu(vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tu(vint16mf2x6_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_i16mf2x6_tu(vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_i16m1x6_tu(vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tu(vint32mf2x6_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_i32mf2x6_tu(vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_i32m1x6_tu(vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_i64m1x6_tu(vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tu(vuint8mf8x6_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u8mf8x6_tu(vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tu(vuint8mf4x6_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u8mf4x6_tu(vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tu(vuint8mf2x6_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u8mf2x6_tu(vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u8m1x6_tu(vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tu(vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u16mf4x6_tu(vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tu(vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u16mf2x6_tu(vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tu(vuint16m1x6_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u16m1x6_tu(vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tu(vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u32mf2x6_tu(vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tu(vuint32m1x6_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u32m1x6_tu(vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tu(vuint64m1x6_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u64m1x6_tu(vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tum(vbool64_t vm, + vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_f16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tum(vbool32_t vm, + vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_f16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_f16m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tum(vbool64_t vm, + vfloat32mf2x6_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_f32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_f32m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_f64m1x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxseg6ei64_v_i8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg6ei64_v_i8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxseg6ei64_v_i8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg6ei64_v_i8m1x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_i16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_i16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_i16m1x6_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_i32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_i32m1x6_tum(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_i64m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg6ei64_v_u8m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u16m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u32m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u64m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_tumu(vbool64_t vm, + vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_f16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_tumu(vbool32_t vm, + vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_f16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_f16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_tumu(vbool64_t vm, + vfloat32mf2x6_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_f32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_f32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_f64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_i8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_i8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_i8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg6ei64_v_i8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_i16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_i16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_i16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_i32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_i32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_i64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_tumu(vbool64_t vm, + vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_tumu(vbool32_t vm, + vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_tumu(vbool64_t vm, + vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x6_t test_vluxseg6ei64_v_f16mf4x6_mu(vbool64_t vm, + vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_f16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x6_t test_vluxseg6ei64_v_f16mf2x6_mu(vbool32_t vm, + vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_f16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x6_t test_vluxseg6ei64_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_f16m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x6_t test_vluxseg6ei64_v_f32mf2x6_mu(vbool64_t vm, + vfloat32mf2x6_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_f32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x6_t test_vluxseg6ei64_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg6ei64_v_f32m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x6_t test_vluxseg6ei64_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_f64m1x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x6_t test_vluxseg6ei64_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxseg6ei64_v_i8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x6_t test_vluxseg6ei64_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg6ei64_v_i8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x6_t test_vluxseg6ei64_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxseg6ei64_v_i8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x6_t test_vluxseg6ei64_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg6ei64_v_i8m1x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x6_t test_vluxseg6ei64_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_i16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x6_t test_vluxseg6ei64_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_i16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x6_t test_vluxseg6ei64_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxseg6ei64_v_i16m1x6_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x6_t test_vluxseg6ei64_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_i32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x6_t test_vluxseg6ei64_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg6ei64_v_i32m1x6_mu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x6_t test_vluxseg6ei64_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxseg6ei64_v_i64m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x6_t test_vluxseg6ei64_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x6_t test_vluxseg6ei64_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x6_t test_vluxseg6ei64_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x6_t test_vluxseg6ei64_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg6ei64_v_u8m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x6_t test_vluxseg6ei64_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x6_t test_vluxseg6ei64_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x6_t test_vluxseg6ei64_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u16m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x6_t test_vluxseg6ei64_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x6_t test_vluxseg6ei64_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u32m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x6_t test_vluxseg6ei64_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg6ei64_v_u64m1x6_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxseg6ei8.c b/auto-generated/policy_funcs/llvm-api-tests/vluxseg6ei8.c index 958706261..57ba41d84 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxseg6ei8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxseg6ei8.c @@ -1,423 +1,629 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tu(vfloat16mf4x6_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tu(vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_f16mf4x6_tu(vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tu(vfloat16mf2x6_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tu(vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_f16mf2x6_tu(vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tu(vfloat16m1x6_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tu(vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_f16m1x6_tu(vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tu(vfloat32mf2x6_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tu(vfloat32mf2x6_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_f32mf2x6_tu(vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tu(vfloat32m1x6_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tu(vfloat32m1x6_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_f32m1x6_tu(vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tu(vfloat64m1x6_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tu(vfloat64m1x6_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_f64m1x6_tu(vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tu(vint8mf8x6_t vd, const int8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_i8mf8x6_tu(vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tu(vint8mf4x6_t vd, const int8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_i8mf4x6_tu(vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tu(vint8mf2x6_t vd, const int8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_i8mf2x6_tu(vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tu(vint8m1x6_t vd, const int8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_i8m1x6_tu(vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tu(vint16mf4x6_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tu(vint16mf4x6_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_i16mf4x6_tu(vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tu(vint16mf2x6_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tu(vint16mf2x6_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_i16mf2x6_tu(vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tu(vint16m1x6_t vd, const int16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_i16m1x6_tu(vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tu(vint32mf2x6_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tu(vint32mf2x6_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_i32mf2x6_tu(vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tu(vint32m1x6_t vd, const int32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_i32m1x6_tu(vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tu(vint64m1x6_t vd, const int64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_i64m1x6_tu(vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tu(vuint8mf8x6_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tu(vuint8mf8x6_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_u8mf8x6_tu(vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tu(vuint8mf4x6_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tu(vuint8mf4x6_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_u8mf4x6_tu(vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tu(vuint8mf2x6_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tu(vuint8mf2x6_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_u8mf2x6_tu(vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tu(vuint8m1x6_t vd, const uint8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_u8m1x6_tu(vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tu(vuint16mf4x6_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tu(vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_u16mf4x6_tu(vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tu(vuint16mf2x6_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tu(vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_u16mf2x6_tu(vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tu(vuint16m1x6_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tu(vuint16m1x6_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_u16m1x6_tu(vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tu(vuint32mf2x6_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tu(vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_u32mf2x6_tu(vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tu(vuint32m1x6_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tu(vuint32m1x6_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_u32m1x6_tu(vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tu(vuint64m1x6_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tu(vuint64m1x6_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_u64m1x6_tu(vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tum(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tum(vbool64_t vm, + vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_f16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tum(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tum(vbool32_t vm, + vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_f16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tum(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_f16m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tum(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tum(vbool64_t vm, + vfloat32mf2x6_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_f32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tum(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_f32m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tum(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_f64m1x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tum(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_i8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tum(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_i8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tum(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_i8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tum(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_i8m1x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tum(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_i16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tum(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_i16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tum(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_i16m1x6_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tum(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_i32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tum(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_i32m1x6_tum(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tum(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_i64m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tum(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_u8mf8x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tum(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_u8mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tum(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_u8mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tum(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_u8m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tum(vbool64_t vm, vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_u16mf4x6_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tum(vbool32_t vm, vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_u16mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tum(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_u16m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tum(vbool64_t vm, vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_u32mf2x6_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tum(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_u32m1x6_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tum(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_u64m1x6_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tumu(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_tumu(vbool64_t vm, + vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_f16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tumu(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_tumu(vbool32_t vm, + vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_f16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_tumu(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_f16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tumu(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_tumu(vbool64_t vm, + vfloat32mf2x6_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_f32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_tumu(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_f32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_tumu(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_f64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_tumu(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_i8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_tumu(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_i8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_tumu(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_i8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_tumu(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_i8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_tumu(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_i16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_tumu(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_i16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_tumu(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_i16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_tumu(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_i32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_tumu(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_i32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_tumu(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_i64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_tumu(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_u8mf8x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_tumu(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_u8mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_tumu(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_u8mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_tumu(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_u8m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_tumu(vbool64_t vm, vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_u16mf4x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_tumu(vbool32_t vm, vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_u16mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_tumu(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_u16m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_tumu(vbool64_t vm, vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_u32mf2x6_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_tumu(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_u32m1x6_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_tumu(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_u64m1x6_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x6_t test_vluxseg6ei8_v_f16mf4x6_mu(vbool64_t vm, vfloat16mf4x6_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_f16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x6_t test_vluxseg6ei8_v_f16mf2x6_mu(vbool32_t vm, vfloat16mf2x6_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_f16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x6_t test_vluxseg6ei8_v_f16m1x6_mu(vbool16_t vm, vfloat16m1x6_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_f16m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x6_t test_vluxseg6ei8_v_f32mf2x6_mu(vbool64_t vm, vfloat32mf2x6_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_f32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x6_t test_vluxseg6ei8_v_f32m1x6_mu(vbool32_t vm, vfloat32m1x6_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_f32m1x6_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x6_t test_vluxseg6ei8_v_f64m1x6_mu(vbool64_t vm, vfloat64m1x6_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_f64m1x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x6_t test_vluxseg6ei8_v_i8mf8x6_mu(vbool64_t vm, vint8mf8x6_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_i8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x6_t test_vluxseg6ei8_v_i8mf4x6_mu(vbool32_t vm, vint8mf4x6_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_i8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x6_t test_vluxseg6ei8_v_i8mf2x6_mu(vbool16_t vm, vint8mf2x6_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_i8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x6_t test_vluxseg6ei8_v_i8m1x6_mu(vbool8_t vm, vint8m1x6_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_i8m1x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x6_t test_vluxseg6ei8_v_i16mf4x6_mu(vbool64_t vm, vint16mf4x6_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_i16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x6_t test_vluxseg6ei8_v_i16mf2x6_mu(vbool32_t vm, vint16mf2x6_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_i16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x6_t test_vluxseg6ei8_v_i16m1x6_mu(vbool16_t vm, vint16m1x6_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_i16m1x6_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x6_t test_vluxseg6ei8_v_i32mf2x6_mu(vbool64_t vm, vint32mf2x6_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_i32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x6_t test_vluxseg6ei8_v_i32m1x6_mu(vbool32_t vm, vint32m1x6_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_i32m1x6_mu(vm, vd, rs1, rs2, vl); } -vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x6_t test_vluxseg6ei8_v_i64m1x6_mu(vbool64_t vm, vint64m1x6_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_i64m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x6_t test_vluxseg6ei8_v_u8mf8x6_mu(vbool64_t vm, vuint8mf8x6_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_u8mf8x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x6_t test_vluxseg6ei8_v_u8mf4x6_mu(vbool32_t vm, vuint8mf4x6_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_u8mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x6_t test_vluxseg6ei8_v_u8mf2x6_mu(vbool16_t vm, vuint8mf2x6_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_u8mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x6_t test_vluxseg6ei8_v_u8m1x6_mu(vbool8_t vm, vuint8m1x6_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg6ei8_v_u8m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x6_t test_vluxseg6ei8_v_u16mf4x6_mu(vbool64_t vm, vuint16mf4x6_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_u16mf4x6_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x6_t test_vluxseg6ei8_v_u16mf2x6_mu(vbool32_t vm, vuint16mf2x6_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_u16mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x6_t test_vluxseg6ei8_v_u16m1x6_mu(vbool16_t vm, vuint16m1x6_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_u16m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x6_t test_vluxseg6ei8_v_u32mf2x6_mu(vbool64_t vm, vuint32mf2x6_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_u32mf2x6_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x6_t test_vluxseg6ei8_v_u32m1x6_mu(vbool32_t vm, vuint32m1x6_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_u32m1x6_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x6_t test_vluxseg6ei8_v_u64m1x6_mu(vbool64_t vm, vuint64m1x6_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg6ei8_v_u64m1x6_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxseg7ei16.c b/auto-generated/policy_funcs/llvm-api-tests/vluxseg7ei16.c index 013d1222b..bc5521e82 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxseg7ei16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxseg7ei16.c @@ -1,423 +1,635 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tu(vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_f16mf4x7_tu(vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tu(vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_f16mf2x7_tu(vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tu(vfloat16m1x7_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tu(vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_f16m1x7_tu(vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tu(vfloat32mf2x7_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_f32mf2x7_tu(vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tu(vfloat32m1x7_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tu(vfloat32m1x7_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_f32m1x7_tu(vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tu(vfloat64m1x7_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tu(vfloat64m1x7_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_f64m1x7_tu(vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i8mf8x7_tu(vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i8mf4x7_tu(vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i8mf2x7_tu(vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i8m1x7_tu(vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tu(vint16mf4x7_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i16mf4x7_tu(vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tu(vint16mf2x7_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i16mf2x7_tu(vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i16m1x7_tu(vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tu(vint32mf2x7_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i32mf2x7_tu(vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i32m1x7_tu(vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i64m1x7_tu(vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tu(vuint8mf8x7_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u8mf8x7_tu(vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tu(vuint8mf4x7_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u8mf4x7_tu(vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tu(vuint8mf2x7_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u8mf2x7_tu(vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u8m1x7_tu(vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tu(vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u16mf4x7_tu(vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tu(vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u16mf2x7_tu(vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tu(vuint16m1x7_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u16m1x7_tu(vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tu(vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u32mf2x7_tu(vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tu(vuint32m1x7_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u32m1x7_tu(vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tu(vuint64m1x7_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u64m1x7_tu(vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tum(vbool64_t vm, + vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_f16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tum(vbool32_t vm, + vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_f16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_f16m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tum(vbool64_t vm, + vfloat32mf2x7_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_f32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_f32m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_f64m1x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxseg7ei16_v_i8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg7ei16_v_i8m1x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i16m1x7_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i32m1x7_tum(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i64m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg7ei16_v_u8m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u16m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u32m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u64m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_tumu(vbool64_t vm, + vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_f16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_tumu(vbool32_t vm, + vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_f16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_f16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_tumu(vbool64_t vm, + vfloat32mf2x7_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_f32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_f32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_f64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg7ei16_v_i8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_tumu(vbool64_t vm, + vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_tumu(vbool32_t vm, + vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_tumu(vbool64_t vm, + vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x7_t test_vluxseg7ei16_v_f16mf4x7_mu(vbool64_t vm, + vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_f16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x7_t test_vluxseg7ei16_v_f16mf2x7_mu(vbool32_t vm, + vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_f16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x7_t test_vluxseg7ei16_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_f16m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x7_t test_vluxseg7ei16_v_f32mf2x7_mu(vbool64_t vm, + vfloat32mf2x7_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_f32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x7_t test_vluxseg7ei16_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_f32m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x7_t test_vluxseg7ei16_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_f64m1x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x7_t test_vluxseg7ei16_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxseg7ei16_v_i8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x7_t test_vluxseg7ei16_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxseg7ei16_v_i8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x7_t test_vluxseg7ei16_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxseg7ei16_v_i8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x7_t test_vluxseg7ei16_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg7ei16_v_i8m1x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x7_t test_vluxseg7ei16_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x7_t test_vluxseg7ei16_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x7_t test_vluxseg7ei16_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxseg7ei16_v_i16m1x7_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x7_t test_vluxseg7ei16_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x7_t test_vluxseg7ei16_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i32m1x7_mu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x7_t test_vluxseg7ei16_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_i64m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x7_t test_vluxseg7ei16_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x7_t test_vluxseg7ei16_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x7_t test_vluxseg7ei16_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x7_t test_vluxseg7ei16_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg7ei16_v_u8m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x7_t test_vluxseg7ei16_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x7_t test_vluxseg7ei16_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x7_t test_vluxseg7ei16_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u16m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x7_t test_vluxseg7ei16_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x7_t test_vluxseg7ei16_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u32m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x7_t test_vluxseg7ei16_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei16_v_u64m1x7_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxseg7ei32.c b/auto-generated/policy_funcs/llvm-api-tests/vluxseg7ei32.c index bb2f0b841..0dbb9fcce 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxseg7ei32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxseg7ei32.c @@ -1,423 +1,635 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tu(vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_f16mf4x7_tu(vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tu(vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_f16mf2x7_tu(vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tu(vfloat16m1x7_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tu(vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_f16m1x7_tu(vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tu(vfloat32mf2x7_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_f32mf2x7_tu(vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tu(vfloat32m1x7_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tu(vfloat32m1x7_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg7ei32_v_f32m1x7_tu(vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tu(vfloat64m1x7_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tu(vfloat64m1x7_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_f64m1x7_tu(vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_i8mf8x7_tu(vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_i8mf4x7_tu(vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_i8mf2x7_tu(vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_i8m1x7_tu(vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tu(vint16mf4x7_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_i16mf4x7_tu(vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tu(vint16mf2x7_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_i16mf2x7_tu(vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_i16m1x7_tu(vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tu(vint32mf2x7_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_i32mf2x7_tu(vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_i32m1x7_tu(vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_i64m1x7_tu(vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tu(vuint8mf8x7_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u8mf8x7_tu(vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tu(vuint8mf4x7_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u8mf4x7_tu(vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tu(vuint8mf2x7_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u8mf2x7_tu(vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u8m1x7_tu(vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tu(vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u16mf4x7_tu(vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tu(vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u16mf2x7_tu(vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tu(vuint16m1x7_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u16m1x7_tu(vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tu(vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u32mf2x7_tu(vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tu(vuint32m1x7_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u32m1x7_tu(vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tu(vuint64m1x7_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u64m1x7_tu(vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tum(vbool64_t vm, + vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_f16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tum(vbool32_t vm, + vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_f16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_f16m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tum(vbool64_t vm, + vfloat32mf2x7_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_f32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_f32m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_f64m1x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_i8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg7ei32_v_i8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxseg7ei32_v_i8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg7ei32_v_i8m1x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_i16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_i16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_i16m1x7_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_i32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_i32m1x7_tum(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_i64m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg7ei32_v_u8m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u16m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u32m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u64m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_tumu(vbool64_t vm, + vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_f16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_tumu(vbool32_t vm, + vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_f16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_f16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_tumu(vbool64_t vm, + vfloat32mf2x7_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_f32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_f32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_f64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_i8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_i8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_i8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg7ei32_v_i8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_i16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_i16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_i16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_i32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_i32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_i64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_tumu(vbool64_t vm, + vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_tumu(vbool32_t vm, + vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_tumu(vbool64_t vm, + vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x7_t test_vluxseg7ei32_v_f16mf4x7_mu(vbool64_t vm, + vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_f16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x7_t test_vluxseg7ei32_v_f16mf2x7_mu(vbool32_t vm, + vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_f16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x7_t test_vluxseg7ei32_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_f16m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x7_t test_vluxseg7ei32_v_f32mf2x7_mu(vbool64_t vm, + vfloat32mf2x7_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_f32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x7_t test_vluxseg7ei32_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg7ei32_v_f32m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x7_t test_vluxseg7ei32_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_f64m1x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x7_t test_vluxseg7ei32_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxseg7ei32_v_i8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x7_t test_vluxseg7ei32_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg7ei32_v_i8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x7_t test_vluxseg7ei32_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxseg7ei32_v_i8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x7_t test_vluxseg7ei32_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg7ei32_v_i8m1x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x7_t test_vluxseg7ei32_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_i16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x7_t test_vluxseg7ei32_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_i16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x7_t test_vluxseg7ei32_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxseg7ei32_v_i16m1x7_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x7_t test_vluxseg7ei32_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_i32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x7_t test_vluxseg7ei32_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg7ei32_v_i32m1x7_mu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x7_t test_vluxseg7ei32_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_i64m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x7_t test_vluxseg7ei32_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x7_t test_vluxseg7ei32_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x7_t test_vluxseg7ei32_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x7_t test_vluxseg7ei32_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg7ei32_v_u8m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x7_t test_vluxseg7ei32_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x7_t test_vluxseg7ei32_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x7_t test_vluxseg7ei32_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u16m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x7_t test_vluxseg7ei32_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x7_t test_vluxseg7ei32_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u32m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x7_t test_vluxseg7ei32_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei32_v_u64m1x7_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxseg7ei64.c b/auto-generated/policy_funcs/llvm-api-tests/vluxseg7ei64.c index 8d13adbe7..a73ef59d3 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxseg7ei64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxseg7ei64.c @@ -1,423 +1,635 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tu(vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_f16mf4x7_tu(vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tu(vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_f16mf2x7_tu(vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tu(vfloat16m1x7_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tu(vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_f16m1x7_tu(vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tu(vfloat32mf2x7_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_f32mf2x7_tu(vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tu(vfloat32m1x7_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tu(vfloat32m1x7_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg7ei64_v_f32m1x7_tu(vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tu(vfloat64m1x7_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tu(vfloat64m1x7_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_f64m1x7_tu(vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_i8mf8x7_tu(vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_i8mf4x7_tu(vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_i8mf2x7_tu(vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_i8m1x7_tu(vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tu(vint16mf4x7_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_i16mf4x7_tu(vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tu(vint16mf2x7_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_i16mf2x7_tu(vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_i16m1x7_tu(vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tu(vint32mf2x7_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_i32mf2x7_tu(vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_i32m1x7_tu(vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_i64m1x7_tu(vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tu(vuint8mf8x7_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u8mf8x7_tu(vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tu(vuint8mf4x7_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u8mf4x7_tu(vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tu(vuint8mf2x7_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u8mf2x7_tu(vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u8m1x7_tu(vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tu(vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u16mf4x7_tu(vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tu(vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u16mf2x7_tu(vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tu(vuint16m1x7_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u16m1x7_tu(vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tu(vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u32mf2x7_tu(vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tu(vuint32m1x7_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u32m1x7_tu(vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tu(vuint64m1x7_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u64m1x7_tu(vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tum(vbool64_t vm, + vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_f16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tum(vbool32_t vm, + vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_f16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_f16m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tum(vbool64_t vm, + vfloat32mf2x7_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_f32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_f32m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_f64m1x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxseg7ei64_v_i8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg7ei64_v_i8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxseg7ei64_v_i8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg7ei64_v_i8m1x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_i16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_i16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_i16m1x7_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_i32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_i32m1x7_tum(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_i64m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg7ei64_v_u8m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u16m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u32m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u64m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_tumu(vbool64_t vm, + vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_f16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_tumu(vbool32_t vm, + vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_f16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_f16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_tumu(vbool64_t vm, + vfloat32mf2x7_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_f32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_f32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_f64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_i8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_i8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_i8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg7ei64_v_i8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_i16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_i16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_i16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_i32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_i32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_i64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_tumu(vbool64_t vm, + vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_tumu(vbool32_t vm, + vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_tumu(vbool64_t vm, + vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x7_t test_vluxseg7ei64_v_f16mf4x7_mu(vbool64_t vm, + vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_f16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x7_t test_vluxseg7ei64_v_f16mf2x7_mu(vbool32_t vm, + vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_f16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x7_t test_vluxseg7ei64_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_f16m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x7_t test_vluxseg7ei64_v_f32mf2x7_mu(vbool64_t vm, + vfloat32mf2x7_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_f32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x7_t test_vluxseg7ei64_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg7ei64_v_f32m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x7_t test_vluxseg7ei64_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_f64m1x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x7_t test_vluxseg7ei64_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxseg7ei64_v_i8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x7_t test_vluxseg7ei64_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg7ei64_v_i8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x7_t test_vluxseg7ei64_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxseg7ei64_v_i8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x7_t test_vluxseg7ei64_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg7ei64_v_i8m1x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x7_t test_vluxseg7ei64_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_i16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x7_t test_vluxseg7ei64_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_i16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x7_t test_vluxseg7ei64_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxseg7ei64_v_i16m1x7_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x7_t test_vluxseg7ei64_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_i32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x7_t test_vluxseg7ei64_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg7ei64_v_i32m1x7_mu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x7_t test_vluxseg7ei64_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxseg7ei64_v_i64m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x7_t test_vluxseg7ei64_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x7_t test_vluxseg7ei64_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x7_t test_vluxseg7ei64_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x7_t test_vluxseg7ei64_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg7ei64_v_u8m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x7_t test_vluxseg7ei64_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x7_t test_vluxseg7ei64_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x7_t test_vluxseg7ei64_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u16m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x7_t test_vluxseg7ei64_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x7_t test_vluxseg7ei64_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u32m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x7_t test_vluxseg7ei64_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg7ei64_v_u64m1x7_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxseg7ei8.c b/auto-generated/policy_funcs/llvm-api-tests/vluxseg7ei8.c index d5c7b0730..214016b55 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxseg7ei8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxseg7ei8.c @@ -1,423 +1,629 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tu(vfloat16mf4x7_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tu(vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_f16mf4x7_tu(vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tu(vfloat16mf2x7_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tu(vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_f16mf2x7_tu(vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tu(vfloat16m1x7_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tu(vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_f16m1x7_tu(vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tu(vfloat32mf2x7_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tu(vfloat32mf2x7_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_f32mf2x7_tu(vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tu(vfloat32m1x7_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tu(vfloat32m1x7_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_f32m1x7_tu(vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tu(vfloat64m1x7_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tu(vfloat64m1x7_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_f64m1x7_tu(vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tu(vint8mf8x7_t vd, const int8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_i8mf8x7_tu(vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tu(vint8mf4x7_t vd, const int8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_i8mf4x7_tu(vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tu(vint8mf2x7_t vd, const int8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_i8mf2x7_tu(vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tu(vint8m1x7_t vd, const int8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_i8m1x7_tu(vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tu(vint16mf4x7_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tu(vint16mf4x7_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_i16mf4x7_tu(vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tu(vint16mf2x7_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tu(vint16mf2x7_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_i16mf2x7_tu(vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tu(vint16m1x7_t vd, const int16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_i16m1x7_tu(vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tu(vint32mf2x7_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tu(vint32mf2x7_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_i32mf2x7_tu(vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tu(vint32m1x7_t vd, const int32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_i32m1x7_tu(vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tu(vint64m1x7_t vd, const int64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_i64m1x7_tu(vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tu(vuint8mf8x7_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tu(vuint8mf8x7_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_u8mf8x7_tu(vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tu(vuint8mf4x7_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tu(vuint8mf4x7_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_u8mf4x7_tu(vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tu(vuint8mf2x7_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tu(vuint8mf2x7_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_u8mf2x7_tu(vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tu(vuint8m1x7_t vd, const uint8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_u8m1x7_tu(vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tu(vuint16mf4x7_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tu(vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_u16mf4x7_tu(vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tu(vuint16mf2x7_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tu(vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_u16mf2x7_tu(vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tu(vuint16m1x7_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tu(vuint16m1x7_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_u16m1x7_tu(vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tu(vuint32mf2x7_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tu(vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_u32mf2x7_tu(vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tu(vuint32m1x7_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tu(vuint32m1x7_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_u32m1x7_tu(vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tu(vuint64m1x7_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tu(vuint64m1x7_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_u64m1x7_tu(vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tum(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tum(vbool64_t vm, + vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_f16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tum(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tum(vbool32_t vm, + vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_f16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tum(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_f16m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tum(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tum(vbool64_t vm, + vfloat32mf2x7_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_f32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tum(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_f32m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tum(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_f64m1x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tum(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_i8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tum(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_i8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tum(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_i8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tum(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_i8m1x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tum(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_i16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tum(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_i16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tum(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_i16m1x7_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tum(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_i32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tum(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_i32m1x7_tum(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tum(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_i64m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tum(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_u8mf8x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tum(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_u8mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tum(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_u8mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tum(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_u8m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tum(vbool64_t vm, vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_u16mf4x7_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tum(vbool32_t vm, vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_u16mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tum(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_u16m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tum(vbool64_t vm, vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_u32mf2x7_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tum(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_u32m1x7_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tum(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_u64m1x7_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tumu(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_tumu(vbool64_t vm, + vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_f16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tumu(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_tumu(vbool32_t vm, + vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_f16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_tumu(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_f16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tumu(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_tumu(vbool64_t vm, + vfloat32mf2x7_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_f32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_tumu(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_f32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_tumu(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_f64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_tumu(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_i8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_tumu(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_i8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_tumu(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_i8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_tumu(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_i8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_tumu(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_i16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_tumu(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_i16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_tumu(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_i16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_tumu(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_i32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_tumu(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_i32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_tumu(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_i64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_tumu(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_u8mf8x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_tumu(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_u8mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_tumu(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_u8mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_tumu(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_u8m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_tumu(vbool64_t vm, vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_u16mf4x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_tumu(vbool32_t vm, vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_u16mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_tumu(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_u16m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_tumu(vbool64_t vm, vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_u32mf2x7_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_tumu(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_u32m1x7_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_tumu(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_u64m1x7_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x7_t test_vluxseg7ei8_v_f16mf4x7_mu(vbool64_t vm, vfloat16mf4x7_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_f16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x7_t test_vluxseg7ei8_v_f16mf2x7_mu(vbool32_t vm, vfloat16mf2x7_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_f16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x7_t test_vluxseg7ei8_v_f16m1x7_mu(vbool16_t vm, vfloat16m1x7_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_f16m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x7_t test_vluxseg7ei8_v_f32mf2x7_mu(vbool64_t vm, vfloat32mf2x7_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_f32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x7_t test_vluxseg7ei8_v_f32m1x7_mu(vbool32_t vm, vfloat32m1x7_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_f32m1x7_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x7_t test_vluxseg7ei8_v_f64m1x7_mu(vbool64_t vm, vfloat64m1x7_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_f64m1x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x7_t test_vluxseg7ei8_v_i8mf8x7_mu(vbool64_t vm, vint8mf8x7_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_i8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x7_t test_vluxseg7ei8_v_i8mf4x7_mu(vbool32_t vm, vint8mf4x7_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_i8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x7_t test_vluxseg7ei8_v_i8mf2x7_mu(vbool16_t vm, vint8mf2x7_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_i8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x7_t test_vluxseg7ei8_v_i8m1x7_mu(vbool8_t vm, vint8m1x7_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_i8m1x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x7_t test_vluxseg7ei8_v_i16mf4x7_mu(vbool64_t vm, vint16mf4x7_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_i16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x7_t test_vluxseg7ei8_v_i16mf2x7_mu(vbool32_t vm, vint16mf2x7_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_i16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x7_t test_vluxseg7ei8_v_i16m1x7_mu(vbool16_t vm, vint16m1x7_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_i16m1x7_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x7_t test_vluxseg7ei8_v_i32mf2x7_mu(vbool64_t vm, vint32mf2x7_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_i32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x7_t test_vluxseg7ei8_v_i32m1x7_mu(vbool32_t vm, vint32m1x7_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_i32m1x7_mu(vm, vd, rs1, rs2, vl); } -vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x7_t test_vluxseg7ei8_v_i64m1x7_mu(vbool64_t vm, vint64m1x7_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_i64m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x7_t test_vluxseg7ei8_v_u8mf8x7_mu(vbool64_t vm, vuint8mf8x7_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_u8mf8x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x7_t test_vluxseg7ei8_v_u8mf4x7_mu(vbool32_t vm, vuint8mf4x7_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_u8mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x7_t test_vluxseg7ei8_v_u8mf2x7_mu(vbool16_t vm, vuint8mf2x7_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_u8mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x7_t test_vluxseg7ei8_v_u8m1x7_mu(vbool8_t vm, vuint8m1x7_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg7ei8_v_u8m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x7_t test_vluxseg7ei8_v_u16mf4x7_mu(vbool64_t vm, vuint16mf4x7_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_u16mf4x7_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x7_t test_vluxseg7ei8_v_u16mf2x7_mu(vbool32_t vm, vuint16mf2x7_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_u16mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x7_t test_vluxseg7ei8_v_u16m1x7_mu(vbool16_t vm, vuint16m1x7_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_u16m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x7_t test_vluxseg7ei8_v_u32mf2x7_mu(vbool64_t vm, vuint32mf2x7_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_u32mf2x7_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x7_t test_vluxseg7ei8_v_u32m1x7_mu(vbool32_t vm, vuint32m1x7_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_u32m1x7_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x7_t test_vluxseg7ei8_v_u64m1x7_mu(vbool64_t vm, vuint64m1x7_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg7ei8_v_u64m1x7_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxseg8ei16.c b/auto-generated/policy_funcs/llvm-api-tests/vluxseg8ei16.c index 8fd8b0c1c..3d0951456 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxseg8ei16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxseg8ei16.c @@ -1,423 +1,635 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tu(vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_f16mf4x8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tu(vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_f16mf2x8_tu(vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tu(vfloat16m1x8_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tu(vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_f16m1x8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tu(vfloat32mf2x8_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_f32mf2x8_tu(vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tu(vfloat32m1x8_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tu(vfloat32m1x8_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_f32m1x8_tu(vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tu(vfloat64m1x8_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tu(vfloat64m1x8_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_f64m1x8_tu(vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i8mf8x8_tu(vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i8mf4x8_tu(vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i8mf2x8_tu(vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i8m1x8_tu(vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tu(vint16mf4x8_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i16mf4x8_tu(vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tu(vint16mf2x8_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i16mf2x8_tu(vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i16m1x8_tu(vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tu(vint32mf2x8_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i32mf2x8_tu(vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i32m1x8_tu(vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i64m1x8_tu(vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tu(vuint8mf8x8_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u8mf8x8_tu(vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tu(vuint8mf4x8_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u8mf4x8_tu(vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tu(vuint8mf2x8_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u8mf2x8_tu(vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u8m1x8_tu(vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tu(vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u16mf4x8_tu(vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tu(vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u16mf2x8_tu(vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tu(vuint16m1x8_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u16m1x8_tu(vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tu(vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u32mf2x8_tu(vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tu(vuint32m1x8_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u32m1x8_tu(vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tu(vuint64m1x8_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u64m1x8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tum(vbool64_t vm, + vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_f16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tum(vbool32_t vm, + vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_f16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_f16m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tum(vbool64_t vm, + vfloat32mf2x8_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_f32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_f32m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_f64m1x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxseg8ei16_v_i8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg8ei16_v_i8m1x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i16m1x8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i32m1x8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i64m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg8ei16_v_u8m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u16m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u32m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u64m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_tumu(vbool64_t vm, + vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_f16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_tumu(vbool32_t vm, + vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_f16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_f16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_tumu(vbool64_t vm, + vfloat32mf2x8_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_f32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_f32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_f64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg8ei16_v_i8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, + vuint16m2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_tumu(vbool64_t vm, + vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_tumu(vbool32_t vm, + vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_tumu(vbool64_t vm, + vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat16mf4x8_t test_vluxseg8ei16_v_f16mf4x8_mu(vbool64_t vm, + vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_f16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat16mf2x8_t test_vluxseg8ei16_v_f16mf2x8_mu(vbool32_t vm, + vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_f16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, vuint16m1_t rs2, size_t vl) { +vfloat16m1x8_t test_vluxseg8ei16_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_f16m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat32mf2x8_t test_vluxseg8ei16_v_f32mf2x8_mu(vbool64_t vm, + vfloat32mf2x8_t vd, + const float *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_f32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, vuint16mf2_t rs2, size_t vl) { +vfloat32m1x8_t test_vluxseg8ei16_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_f32m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, vuint16mf4_t rs2, size_t vl) { +vfloat64m1x8_t test_vluxseg8ei16_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_f64m1x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint8mf8x8_t test_vluxseg8ei16_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, vuint16mf4_t rs2, + size_t vl) { return __riscv_vluxseg8ei16_v_i8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint8mf4x8_t test_vluxseg8ei16_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, vuint16mf2_t rs2, + size_t vl) { return __riscv_vluxseg8ei16_v_i8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint16m1_t rs2, size_t vl) { +vint8mf2x8_t test_vluxseg8ei16_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxseg8ei16_v_i8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint16m2_t rs2, size_t vl) { +vint8m1x8_t test_vluxseg8ei16_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg8ei16_v_i8m1x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint16mf4x8_t test_vluxseg8ei16_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint16mf2x8_t test_vluxseg8ei16_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint16m1_t rs2, size_t vl) { +vint16m1x8_t test_vluxseg8ei16_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, vuint16m1_t rs2, + size_t vl) { return __riscv_vluxseg8ei16_v_i16m1x8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint32mf2x8_t test_vluxseg8ei16_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vint32m1x8_t test_vluxseg8ei16_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i32m1x8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vint64m1x8_t test_vluxseg8ei16_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_i64m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint8mf8x8_t test_vluxseg8ei16_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint8mf4x8_t test_vluxseg8ei16_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint8mf2x8_t test_vluxseg8ei16_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint16m2_t rs2, size_t vl) { +vuint8m1x8_t test_vluxseg8ei16_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, vuint16m2_t rs2, + size_t vl) { return __riscv_vluxseg8ei16_v_u8m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint16mf4x8_t test_vluxseg8ei16_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint16mf2x8_t test_vluxseg8ei16_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint16m1_t rs2, size_t vl) { +vuint16m1x8_t test_vluxseg8ei16_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, + vuint16m1_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u16m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint32mf2x8_t test_vluxseg8ei16_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint16mf2_t rs2, size_t vl) { +vuint32m1x8_t test_vluxseg8ei16_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, + vuint16mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u32m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint16mf4_t rs2, size_t vl) { +vuint64m1x8_t test_vluxseg8ei16_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, + vuint16mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei16_v_u64m1x8_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxseg8ei32.c b/auto-generated/policy_funcs/llvm-api-tests/vluxseg8ei32.c index fa316b6ed..557005a61 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxseg8ei32.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxseg8ei32.c @@ -1,423 +1,635 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tu(vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_f16mf4x8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tu(vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_f16mf2x8_tu(vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tu(vfloat16m1x8_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tu(vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_f16m1x8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tu(vfloat32mf2x8_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_f32mf2x8_tu(vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tu(vfloat32m1x8_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tu(vfloat32m1x8_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg8ei32_v_f32m1x8_tu(vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tu(vfloat64m1x8_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tu(vfloat64m1x8_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_f64m1x8_tu(vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_i8mf8x8_tu(vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_i8mf4x8_tu(vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_i8mf2x8_tu(vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_i8m1x8_tu(vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tu(vint16mf4x8_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_i16mf4x8_tu(vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tu(vint16mf2x8_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_i16mf2x8_tu(vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_i16m1x8_tu(vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tu(vint32mf2x8_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_i32mf2x8_tu(vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_i32m1x8_tu(vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_i64m1x8_tu(vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tu(vuint8mf8x8_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u8mf8x8_tu(vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tu(vuint8mf4x8_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u8mf4x8_tu(vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tu(vuint8mf2x8_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u8mf2x8_tu(vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u8m1x8_tu(vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tu(vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u16mf4x8_tu(vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tu(vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u16mf2x8_tu(vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tu(vuint16m1x8_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u16m1x8_tu(vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tu(vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u32mf2x8_tu(vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tu(vuint32m1x8_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u32m1x8_tu(vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tu(vuint64m1x8_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u64m1x8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tum(vbool64_t vm, + vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_f16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tum(vbool32_t vm, + vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_f16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_f16m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tum(vbool64_t vm, + vfloat32mf2x8_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_f32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_f32m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_f64m1x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_i8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg8ei32_v_i8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxseg8ei32_v_i8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg8ei32_v_i8m1x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_i16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_i16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_i16m1x8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_i32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_i32m1x8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_i64m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg8ei32_v_u8m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u16m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u32m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u64m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_tumu(vbool64_t vm, + vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_f16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_tumu(vbool32_t vm, + vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_f16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_f16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_tumu(vbool64_t vm, + vfloat32mf2x8_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_f32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_f32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_f64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_i8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_i8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_i8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg8ei32_v_i8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_i16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_i16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_i16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_i32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_i32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_i64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, + vuint32m4_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_tumu(vbool64_t vm, + vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_tumu(vbool32_t vm, + vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_tumu(vbool64_t vm, + vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat16mf4x8_t test_vluxseg8ei32_v_f16mf4x8_mu(vbool64_t vm, + vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_f16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, vuint32m1_t rs2, size_t vl) { +vfloat16mf2x8_t test_vluxseg8ei32_v_f16mf2x8_mu(vbool32_t vm, + vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_f16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, vuint32m2_t rs2, size_t vl) { +vfloat16m1x8_t test_vluxseg8ei32_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_f16m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat32mf2x8_t test_vluxseg8ei32_v_f32mf2x8_mu(vbool64_t vm, + vfloat32mf2x8_t vd, + const float *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_f32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, vuint32m1_t rs2, size_t vl) { +vfloat32m1x8_t test_vluxseg8ei32_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg8ei32_v_f32m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, vuint32mf2_t rs2, size_t vl) { +vfloat64m1x8_t test_vluxseg8ei32_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_f64m1x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint8mf8x8_t test_vluxseg8ei32_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, vuint32mf2_t rs2, + size_t vl) { return __riscv_vluxseg8ei32_v_i8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint32m1_t rs2, size_t vl) { +vint8mf4x8_t test_vluxseg8ei32_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg8ei32_v_i8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint32m2_t rs2, size_t vl) { +vint8mf2x8_t test_vluxseg8ei32_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxseg8ei32_v_i8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint32m4_t rs2, size_t vl) { +vint8m1x8_t test_vluxseg8ei32_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg8ei32_v_i8m1x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint16mf4x8_t test_vluxseg8ei32_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_i16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint32m1_t rs2, size_t vl) { +vint16mf2x8_t test_vluxseg8ei32_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_i16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint32m2_t rs2, size_t vl) { +vint16m1x8_t test_vluxseg8ei32_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, vuint32m2_t rs2, + size_t vl) { return __riscv_vluxseg8ei32_v_i16m1x8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint32mf2x8_t test_vluxseg8ei32_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_i32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint32m1_t rs2, size_t vl) { +vint32m1x8_t test_vluxseg8ei32_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, vuint32m1_t rs2, + size_t vl) { return __riscv_vluxseg8ei32_v_i32m1x8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vint64m1x8_t test_vluxseg8ei32_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_i64m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint8mf8x8_t test_vluxseg8ei32_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint8mf4x8_t test_vluxseg8ei32_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint8mf2x8_t test_vluxseg8ei32_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint32m4_t rs2, size_t vl) { +vuint8m1x8_t test_vluxseg8ei32_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, vuint32m4_t rs2, + size_t vl) { return __riscv_vluxseg8ei32_v_u8m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint16mf4x8_t test_vluxseg8ei32_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint16mf2x8_t test_vluxseg8ei32_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint32m2_t rs2, size_t vl) { +vuint16m1x8_t test_vluxseg8ei32_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, + vuint32m2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u16m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint32mf2x8_t test_vluxseg8ei32_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint32m1_t rs2, size_t vl) { +vuint32m1x8_t test_vluxseg8ei32_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, + vuint32m1_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u32m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint32mf2_t rs2, size_t vl) { +vuint64m1x8_t test_vluxseg8ei32_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, + vuint32mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei32_v_u64m1x8_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxseg8ei64.c b/auto-generated/policy_funcs/llvm-api-tests/vluxseg8ei64.c index 43af6fe46..e11b05312 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxseg8ei64.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxseg8ei64.c @@ -1,423 +1,635 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tu(vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_f16mf4x8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tu(vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_f16mf2x8_tu(vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tu(vfloat16m1x8_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tu(vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_f16m1x8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tu(vfloat32mf2x8_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_f32mf2x8_tu(vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tu(vfloat32m1x8_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tu(vfloat32m1x8_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg8ei64_v_f32m1x8_tu(vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tu(vfloat64m1x8_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tu(vfloat64m1x8_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_f64m1x8_tu(vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_i8mf8x8_tu(vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_i8mf4x8_tu(vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_i8mf2x8_tu(vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_i8m1x8_tu(vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tu(vint16mf4x8_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_i16mf4x8_tu(vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tu(vint16mf2x8_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_i16mf2x8_tu(vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_i16m1x8_tu(vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tu(vint32mf2x8_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_i32mf2x8_tu(vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_i32m1x8_tu(vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_i64m1x8_tu(vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tu(vuint8mf8x8_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u8mf8x8_tu(vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tu(vuint8mf4x8_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u8mf4x8_tu(vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tu(vuint8mf2x8_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u8mf2x8_tu(vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u8m1x8_tu(vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tu(vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u16mf4x8_tu(vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tu(vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u16mf2x8_tu(vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tu(vuint16m1x8_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u16m1x8_tu(vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tu(vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u32mf2x8_tu(vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tu(vuint32m1x8_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u32m1x8_tu(vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tu(vuint64m1x8_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u64m1x8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tum(vbool64_t vm, + vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_f16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tum(vbool32_t vm, + vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_f16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_f16m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tum(vbool64_t vm, + vfloat32mf2x8_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_f32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_f32m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_f64m1x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxseg8ei64_v_i8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg8ei64_v_i8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxseg8ei64_v_i8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg8ei64_v_i8m1x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_i16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_i16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_i16m1x8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_i32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_i32m1x8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_i64m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg8ei64_v_u8m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u16m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u32m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u64m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_tumu(vbool64_t vm, + vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_f16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_tumu(vbool32_t vm, + vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_f16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_f16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_tumu(vbool64_t vm, + vfloat32mf2x8_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_f32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_f32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_f64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_i8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_i8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_i8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg8ei64_v_i8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_i16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_i16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_i16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_i32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_i32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_i64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, + vuint64m8_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_tumu(vbool64_t vm, + vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_tumu(vbool32_t vm, + vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_tumu(vbool64_t vm, + vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, vuint64m1_t rs2, size_t vl) { +vfloat16mf4x8_t test_vluxseg8ei64_v_f16mf4x8_mu(vbool64_t vm, + vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_f16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, vuint64m2_t rs2, size_t vl) { +vfloat16mf2x8_t test_vluxseg8ei64_v_f16mf2x8_mu(vbool32_t vm, + vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_f16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, vuint64m4_t rs2, size_t vl) { +vfloat16m1x8_t test_vluxseg8ei64_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_f16m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, vuint64m1_t rs2, size_t vl) { +vfloat32mf2x8_t test_vluxseg8ei64_v_f32mf2x8_mu(vbool64_t vm, + vfloat32mf2x8_t vd, + const float *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_f32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, vuint64m2_t rs2, size_t vl) { +vfloat32m1x8_t test_vluxseg8ei64_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg8ei64_v_f32m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, vuint64m1_t rs2, size_t vl) { +vfloat64m1x8_t test_vluxseg8ei64_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_f64m1x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint64m1_t rs2, size_t vl) { +vint8mf8x8_t test_vluxseg8ei64_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxseg8ei64_v_i8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint64m2_t rs2, size_t vl) { +vint8mf4x8_t test_vluxseg8ei64_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg8ei64_v_i8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint64m4_t rs2, size_t vl) { +vint8mf2x8_t test_vluxseg8ei64_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxseg8ei64_v_i8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint64m8_t rs2, size_t vl) { +vint8m1x8_t test_vluxseg8ei64_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg8ei64_v_i8m1x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint64m1_t rs2, size_t vl) { +vint16mf4x8_t test_vluxseg8ei64_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_i16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint64m2_t rs2, size_t vl) { +vint16mf2x8_t test_vluxseg8ei64_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_i16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint64m4_t rs2, size_t vl) { +vint16m1x8_t test_vluxseg8ei64_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, vuint64m4_t rs2, + size_t vl) { return __riscv_vluxseg8ei64_v_i16m1x8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint64m1_t rs2, size_t vl) { +vint32mf2x8_t test_vluxseg8ei64_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_i32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint64m2_t rs2, size_t vl) { +vint32m1x8_t test_vluxseg8ei64_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, vuint64m2_t rs2, + size_t vl) { return __riscv_vluxseg8ei64_v_i32m1x8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint64m1_t rs2, size_t vl) { +vint64m1x8_t test_vluxseg8ei64_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, vuint64m1_t rs2, + size_t vl) { return __riscv_vluxseg8ei64_v_i64m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint8mf8x8_t test_vluxseg8ei64_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint8mf4x8_t test_vluxseg8ei64_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint8mf2x8_t test_vluxseg8ei64_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint64m8_t rs2, size_t vl) { +vuint8m1x8_t test_vluxseg8ei64_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, vuint64m8_t rs2, + size_t vl) { return __riscv_vluxseg8ei64_v_u8m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint16mf4x8_t test_vluxseg8ei64_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint16mf2x8_t test_vluxseg8ei64_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint64m4_t rs2, size_t vl) { +vuint16m1x8_t test_vluxseg8ei64_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, + vuint64m4_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u16m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint32mf2x8_t test_vluxseg8ei64_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint64m2_t rs2, size_t vl) { +vuint32m1x8_t test_vluxseg8ei64_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, + vuint64m2_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u32m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint64m1_t rs2, size_t vl) { +vuint64m1x8_t test_vluxseg8ei64_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, + vuint64m1_t rs2, size_t vl) { return __riscv_vluxseg8ei64_v_u64m1x8_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vluxseg8ei8.c b/auto-generated/policy_funcs/llvm-api-tests/vluxseg8ei8.c index 3afc6a59a..10e67ccd1 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vluxseg8ei8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vluxseg8ei8.c @@ -1,423 +1,629 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tu(vfloat16mf4x8_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tu(vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_f16mf4x8_tu(vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tu(vfloat16mf2x8_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tu(vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_f16mf2x8_tu(vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tu(vfloat16m1x8_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tu(vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_f16m1x8_tu(vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tu(vfloat32mf2x8_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tu(vfloat32mf2x8_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_f32mf2x8_tu(vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tu(vfloat32m1x8_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tu(vfloat32m1x8_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_f32m1x8_tu(vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tu(vfloat64m1x8_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tu(vfloat64m1x8_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_f64m1x8_tu(vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tu(vint8mf8x8_t vd, const int8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_i8mf8x8_tu(vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tu(vint8mf4x8_t vd, const int8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_i8mf4x8_tu(vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tu(vint8mf2x8_t vd, const int8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_i8mf2x8_tu(vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tu(vint8m1x8_t vd, const int8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_i8m1x8_tu(vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tu(vint16mf4x8_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tu(vint16mf4x8_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_i16mf4x8_tu(vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tu(vint16mf2x8_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tu(vint16mf2x8_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_i16mf2x8_tu(vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tu(vint16m1x8_t vd, const int16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_i16m1x8_tu(vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tu(vint32mf2x8_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tu(vint32mf2x8_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_i32mf2x8_tu(vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tu(vint32m1x8_t vd, const int32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_i32m1x8_tu(vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tu(vint64m1x8_t vd, const int64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_i64m1x8_tu(vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tu(vuint8mf8x8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tu(vuint8mf8x8_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_u8mf8x8_tu(vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tu(vuint8mf4x8_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tu(vuint8mf4x8_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_u8mf4x8_tu(vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tu(vuint8mf2x8_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tu(vuint8mf2x8_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_u8mf2x8_tu(vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tu(vuint8m1x8_t vd, const uint8_t *rs1, + vuint8m1_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_u8m1x8_tu(vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tu(vuint16mf4x8_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tu(vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_u16mf4x8_tu(vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tu(vuint16mf2x8_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tu(vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_u16mf2x8_tu(vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tu(vuint16m1x8_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tu(vuint16m1x8_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_u16m1x8_tu(vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tu(vuint32mf2x8_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tu(vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_u32mf2x8_tu(vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tu(vuint32m1x8_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tu(vuint32m1x8_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_u32m1x8_tu(vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tu(vuint64m1x8_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tu(vuint64m1x8_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_u64m1x8_tu(vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tum(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tum(vbool64_t vm, + vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_f16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tum(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tum(vbool32_t vm, + vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_f16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tum(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_f16m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tum(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tum(vbool64_t vm, + vfloat32mf2x8_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_f32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tum(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_f32m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tum(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_f64m1x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tum(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_i8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tum(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_i8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tum(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_i8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tum(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_i8m1x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tum(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_i16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tum(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_i16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tum(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_i16m1x8_tum(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tum(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_i32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tum(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_i32m1x8_tum(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tum(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_i64m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tum(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_u8mf8x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tum(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_u8mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tum(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_u8mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tum(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_u8m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tum(vbool64_t vm, vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_u16mf4x8_tum(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tum(vbool32_t vm, vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_u16mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tum(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_u16m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tum(vbool64_t vm, vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_u32mf2x8_tum(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tum(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_u32m1x8_tum(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tum(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_u64m1x8_tum(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tumu(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_tumu(vbool64_t vm, + vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_f16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tumu(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_tumu(vbool32_t vm, + vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_f16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_tumu(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_f16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tumu(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_tumu(vbool64_t vm, + vfloat32mf2x8_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_f32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_tumu(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_f32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_tumu(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_f64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_tumu(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_i8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_tumu(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_i8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_tumu(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_i8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_tumu(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_i8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_tumu(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_i16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_tumu(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_i16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_tumu(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_i16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_tumu(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_i32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_tumu(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_i32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_tumu(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_i64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_tumu(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_u8mf8x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_tumu(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_u8mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_tumu(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_u8mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_tumu(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_u8m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_tumu(vbool64_t vm, vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_u16mf4x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_tumu(vbool32_t vm, vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_u16mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_tumu(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_u16m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_tumu(vbool64_t vm, vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_u32mf2x8_tumu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_tumu(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_u32m1x8_tumu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_tumu(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_u64m1x8_tumu(vm, vd, rs1, rs2, vl); } -vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, const _Float16 *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat16mf4x8_t test_vluxseg8ei8_v_f16mf4x8_mu(vbool64_t vm, vfloat16mf4x8_t vd, + const _Float16 *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_f16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, const _Float16 *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat16mf2x8_t test_vluxseg8ei8_v_f16mf2x8_mu(vbool32_t vm, vfloat16mf2x8_t vd, + const _Float16 *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_f16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, const _Float16 *rs1, vuint8mf2_t rs2, size_t vl) { +vfloat16m1x8_t test_vluxseg8ei8_v_f16m1x8_mu(vbool16_t vm, vfloat16m1x8_t vd, + const _Float16 *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_f16m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, const float *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat32mf2x8_t test_vluxseg8ei8_v_f32mf2x8_mu(vbool64_t vm, vfloat32mf2x8_t vd, + const float *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_f32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, const float *rs1, vuint8mf4_t rs2, size_t vl) { +vfloat32m1x8_t test_vluxseg8ei8_v_f32m1x8_mu(vbool32_t vm, vfloat32m1x8_t vd, + const float *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_f32m1x8_mu(vm, vd, rs1, rs2, vl); } -vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, const double *rs1, vuint8mf8_t rs2, size_t vl) { +vfloat64m1x8_t test_vluxseg8ei8_v_f64m1x8_mu(vbool64_t vm, vfloat64m1x8_t vd, + const double *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_f64m1x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, const int8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint8mf8x8_t test_vluxseg8ei8_v_i8mf8x8_mu(vbool64_t vm, vint8mf8x8_t vd, + const int8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_i8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, const int8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint8mf4x8_t test_vluxseg8ei8_v_i8mf4x8_mu(vbool32_t vm, vint8mf4x8_t vd, + const int8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_i8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, const int8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint8mf2x8_t test_vluxseg8ei8_v_i8mf2x8_mu(vbool16_t vm, vint8mf2x8_t vd, + const int8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_i8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, const int8_t *rs1, vuint8m1_t rs2, size_t vl) { +vint8m1x8_t test_vluxseg8ei8_v_i8m1x8_mu(vbool8_t vm, vint8m1x8_t vd, + const int8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_i8m1x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, const int16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint16mf4x8_t test_vluxseg8ei8_v_i16mf4x8_mu(vbool64_t vm, vint16mf4x8_t vd, + const int16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_i16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, const int16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint16mf2x8_t test_vluxseg8ei8_v_i16mf2x8_mu(vbool32_t vm, vint16mf2x8_t vd, + const int16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_i16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, const int16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vint16m1x8_t test_vluxseg8ei8_v_i16m1x8_mu(vbool16_t vm, vint16m1x8_t vd, + const int16_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_i16m1x8_mu(vm, vd, rs1, rs2, vl); } -vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, const int32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint32mf2x8_t test_vluxseg8ei8_v_i32mf2x8_mu(vbool64_t vm, vint32mf2x8_t vd, + const int32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_i32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, const int32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vint32m1x8_t test_vluxseg8ei8_v_i32m1x8_mu(vbool32_t vm, vint32m1x8_t vd, + const int32_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_i32m1x8_mu(vm, vd, rs1, rs2, vl); } -vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, const int64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vint64m1x8_t test_vluxseg8ei8_v_i64m1x8_mu(vbool64_t vm, vint64m1x8_t vd, + const int64_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_i64m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, const uint8_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint8mf8x8_t test_vluxseg8ei8_v_u8mf8x8_mu(vbool64_t vm, vuint8mf8x8_t vd, + const uint8_t *rs1, vuint8mf8_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_u8mf8x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, const uint8_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint8mf4x8_t test_vluxseg8ei8_v_u8mf4x8_mu(vbool32_t vm, vuint8mf4x8_t vd, + const uint8_t *rs1, vuint8mf4_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_u8mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, const uint8_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint8mf2x8_t test_vluxseg8ei8_v_u8mf2x8_mu(vbool16_t vm, vuint8mf2x8_t vd, + const uint8_t *rs1, vuint8mf2_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_u8mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, const uint8_t *rs1, vuint8m1_t rs2, size_t vl) { +vuint8m1x8_t test_vluxseg8ei8_v_u8m1x8_mu(vbool8_t vm, vuint8m1x8_t vd, + const uint8_t *rs1, vuint8m1_t rs2, + size_t vl) { return __riscv_vluxseg8ei8_v_u8m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, const uint16_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint16mf4x8_t test_vluxseg8ei8_v_u16mf4x8_mu(vbool64_t vm, vuint16mf4x8_t vd, + const uint16_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_u16mf4x8_mu(vm, vd, rs1, rs2, vl); } -vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, const uint16_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint16mf2x8_t test_vluxseg8ei8_v_u16mf2x8_mu(vbool32_t vm, vuint16mf2x8_t vd, + const uint16_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_u16mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, const uint16_t *rs1, vuint8mf2_t rs2, size_t vl) { +vuint16m1x8_t test_vluxseg8ei8_v_u16m1x8_mu(vbool16_t vm, vuint16m1x8_t vd, + const uint16_t *rs1, + vuint8mf2_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_u16m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, const uint32_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint32mf2x8_t test_vluxseg8ei8_v_u32mf2x8_mu(vbool64_t vm, vuint32mf2x8_t vd, + const uint32_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_u32mf2x8_mu(vm, vd, rs1, rs2, vl); } -vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, const uint32_t *rs1, vuint8mf4_t rs2, size_t vl) { +vuint32m1x8_t test_vluxseg8ei8_v_u32m1x8_mu(vbool32_t vm, vuint32m1x8_t vd, + const uint32_t *rs1, + vuint8mf4_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_u32m1x8_mu(vm, vd, rs1, rs2, vl); } -vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, const uint64_t *rs1, vuint8mf8_t rs2, size_t vl) { +vuint64m1x8_t test_vluxseg8ei8_v_u64m1x8_mu(vbool64_t vm, vuint64m1x8_t vd, + const uint64_t *rs1, + vuint8mf8_t rs2, size_t vl) { return __riscv_vluxseg8ei8_v_u64m1x8_mu(vm, vd, rs1, rs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vmacc.c b/auto-generated/policy_funcs/llvm-api-tests/vmacc.c index 213a706e7..48df2176d 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vmacc.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vmacc.c @@ -1,1415 +1,1833 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vint8mf8_t test_vmacc_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vmacc_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, + size_t vl) { return __riscv_vmacc_vv_i8mf8_tu(vd, vs1, vs2, vl); } -vint8mf8_t test_vmacc_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vmacc_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, + size_t vl) { return __riscv_vmacc_vx_i8mf8_tu(vd, rs1, vs2, vl); } -vint8mf4_t test_vmacc_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vmacc_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, + size_t vl) { return __riscv_vmacc_vv_i8mf4_tu(vd, vs1, vs2, vl); } -vint8mf4_t test_vmacc_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vmacc_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, + size_t vl) { return __riscv_vmacc_vx_i8mf4_tu(vd, rs1, vs2, vl); } -vint8mf2_t test_vmacc_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vmacc_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, + size_t vl) { return __riscv_vmacc_vv_i8mf2_tu(vd, vs1, vs2, vl); } -vint8mf2_t test_vmacc_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vmacc_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, + size_t vl) { return __riscv_vmacc_vx_i8mf2_tu(vd, rs1, vs2, vl); } -vint8m1_t test_vmacc_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vmacc_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, + size_t vl) { return __riscv_vmacc_vv_i8m1_tu(vd, vs1, vs2, vl); } -vint8m1_t test_vmacc_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vmacc_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, + size_t vl) { return __riscv_vmacc_vx_i8m1_tu(vd, rs1, vs2, vl); } -vint8m2_t test_vmacc_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vmacc_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, + size_t vl) { return __riscv_vmacc_vv_i8m2_tu(vd, vs1, vs2, vl); } -vint8m2_t test_vmacc_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vmacc_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, + size_t vl) { return __riscv_vmacc_vx_i8m2_tu(vd, rs1, vs2, vl); } -vint8m4_t test_vmacc_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vmacc_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, + size_t vl) { return __riscv_vmacc_vv_i8m4_tu(vd, vs1, vs2, vl); } -vint8m4_t test_vmacc_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vmacc_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, + size_t vl) { return __riscv_vmacc_vx_i8m4_tu(vd, rs1, vs2, vl); } -vint8m8_t test_vmacc_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vmacc_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, + size_t vl) { return __riscv_vmacc_vv_i8m8_tu(vd, vs1, vs2, vl); } -vint8m8_t test_vmacc_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vmacc_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, + size_t vl) { return __riscv_vmacc_vx_i8m8_tu(vd, rs1, vs2, vl); } -vint16mf4_t test_vmacc_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vmacc_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, + vint16mf4_t vs2, size_t vl) { return __riscv_vmacc_vv_i16mf4_tu(vd, vs1, vs2, vl); } -vint16mf4_t test_vmacc_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vmacc_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, + vint16mf4_t vs2, size_t vl) { return __riscv_vmacc_vx_i16mf4_tu(vd, rs1, vs2, vl); } -vint16mf2_t test_vmacc_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vmacc_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, + vint16mf2_t vs2, size_t vl) { return __riscv_vmacc_vv_i16mf2_tu(vd, vs1, vs2, vl); } -vint16mf2_t test_vmacc_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vmacc_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, + vint16mf2_t vs2, size_t vl) { return __riscv_vmacc_vx_i16mf2_tu(vd, rs1, vs2, vl); } -vint16m1_t test_vmacc_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vmacc_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, + size_t vl) { return __riscv_vmacc_vv_i16m1_tu(vd, vs1, vs2, vl); } -vint16m1_t test_vmacc_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vmacc_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, + size_t vl) { return __riscv_vmacc_vx_i16m1_tu(vd, rs1, vs2, vl); } -vint16m2_t test_vmacc_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vmacc_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, + size_t vl) { return __riscv_vmacc_vv_i16m2_tu(vd, vs1, vs2, vl); } -vint16m2_t test_vmacc_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vmacc_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, + size_t vl) { return __riscv_vmacc_vx_i16m2_tu(vd, rs1, vs2, vl); } -vint16m4_t test_vmacc_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vmacc_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, + size_t vl) { return __riscv_vmacc_vv_i16m4_tu(vd, vs1, vs2, vl); } -vint16m4_t test_vmacc_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vmacc_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, + size_t vl) { return __riscv_vmacc_vx_i16m4_tu(vd, rs1, vs2, vl); } -vint16m8_t test_vmacc_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vmacc_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, + size_t vl) { return __riscv_vmacc_vv_i16m8_tu(vd, vs1, vs2, vl); } -vint16m8_t test_vmacc_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vmacc_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, + size_t vl) { return __riscv_vmacc_vx_i16m8_tu(vd, rs1, vs2, vl); } -vint32mf2_t test_vmacc_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vmacc_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, + vint32mf2_t vs2, size_t vl) { return __riscv_vmacc_vv_i32mf2_tu(vd, vs1, vs2, vl); } -vint32mf2_t test_vmacc_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vmacc_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, + vint32mf2_t vs2, size_t vl) { return __riscv_vmacc_vx_i32mf2_tu(vd, rs1, vs2, vl); } -vint32m1_t test_vmacc_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vmacc_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, + size_t vl) { return __riscv_vmacc_vv_i32m1_tu(vd, vs1, vs2, vl); } -vint32m1_t test_vmacc_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vmacc_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, + size_t vl) { return __riscv_vmacc_vx_i32m1_tu(vd, rs1, vs2, vl); } -vint32m2_t test_vmacc_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vmacc_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, + size_t vl) { return __riscv_vmacc_vv_i32m2_tu(vd, vs1, vs2, vl); } -vint32m2_t test_vmacc_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vmacc_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, + size_t vl) { return __riscv_vmacc_vx_i32m2_tu(vd, rs1, vs2, vl); } -vint32m4_t test_vmacc_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vmacc_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, + size_t vl) { return __riscv_vmacc_vv_i32m4_tu(vd, vs1, vs2, vl); } -vint32m4_t test_vmacc_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vmacc_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, + size_t vl) { return __riscv_vmacc_vx_i32m4_tu(vd, rs1, vs2, vl); } -vint32m8_t test_vmacc_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vmacc_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, + size_t vl) { return __riscv_vmacc_vv_i32m8_tu(vd, vs1, vs2, vl); } -vint32m8_t test_vmacc_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vmacc_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, + size_t vl) { return __riscv_vmacc_vx_i32m8_tu(vd, rs1, vs2, vl); } -vint64m1_t test_vmacc_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vmacc_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, + size_t vl) { return __riscv_vmacc_vv_i64m1_tu(vd, vs1, vs2, vl); } -vint64m1_t test_vmacc_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vmacc_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, + size_t vl) { return __riscv_vmacc_vx_i64m1_tu(vd, rs1, vs2, vl); } -vint64m2_t test_vmacc_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vmacc_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, + size_t vl) { return __riscv_vmacc_vv_i64m2_tu(vd, vs1, vs2, vl); } -vint64m2_t test_vmacc_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vmacc_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, + size_t vl) { return __riscv_vmacc_vx_i64m2_tu(vd, rs1, vs2, vl); } -vint64m4_t test_vmacc_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vmacc_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, + size_t vl) { return __riscv_vmacc_vv_i64m4_tu(vd, vs1, vs2, vl); } -vint64m4_t test_vmacc_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vmacc_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, + size_t vl) { return __riscv_vmacc_vx_i64m4_tu(vd, rs1, vs2, vl); } -vint64m8_t test_vmacc_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vmacc_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, + size_t vl) { return __riscv_vmacc_vv_i64m8_tu(vd, vs1, vs2, vl); } -vint64m8_t test_vmacc_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vmacc_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, + size_t vl) { return __riscv_vmacc_vx_i64m8_tu(vd, rs1, vs2, vl); } -vuint8mf8_t test_vmacc_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vmacc_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, + vuint8mf8_t vs2, size_t vl) { return __riscv_vmacc_vv_u8mf8_tu(vd, vs1, vs2, vl); } -vuint8mf8_t test_vmacc_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vmacc_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, + size_t vl) { return __riscv_vmacc_vx_u8mf8_tu(vd, rs1, vs2, vl); } -vuint8mf4_t test_vmacc_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vmacc_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, + vuint8mf4_t vs2, size_t vl) { return __riscv_vmacc_vv_u8mf4_tu(vd, vs1, vs2, vl); } -vuint8mf4_t test_vmacc_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vmacc_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, + size_t vl) { return __riscv_vmacc_vx_u8mf4_tu(vd, rs1, vs2, vl); } -vuint8mf2_t test_vmacc_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vmacc_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, + vuint8mf2_t vs2, size_t vl) { return __riscv_vmacc_vv_u8mf2_tu(vd, vs1, vs2, vl); } -vuint8mf2_t test_vmacc_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vmacc_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, + size_t vl) { return __riscv_vmacc_vx_u8mf2_tu(vd, rs1, vs2, vl); } -vuint8m1_t test_vmacc_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vmacc_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, + size_t vl) { return __riscv_vmacc_vv_u8m1_tu(vd, vs1, vs2, vl); } -vuint8m1_t test_vmacc_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vmacc_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, + size_t vl) { return __riscv_vmacc_vx_u8m1_tu(vd, rs1, vs2, vl); } -vuint8m2_t test_vmacc_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vmacc_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, + size_t vl) { return __riscv_vmacc_vv_u8m2_tu(vd, vs1, vs2, vl); } -vuint8m2_t test_vmacc_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vmacc_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, + size_t vl) { return __riscv_vmacc_vx_u8m2_tu(vd, rs1, vs2, vl); } -vuint8m4_t test_vmacc_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vmacc_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, + size_t vl) { return __riscv_vmacc_vv_u8m4_tu(vd, vs1, vs2, vl); } -vuint8m4_t test_vmacc_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vmacc_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, + size_t vl) { return __riscv_vmacc_vx_u8m4_tu(vd, rs1, vs2, vl); } -vuint8m8_t test_vmacc_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vmacc_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, + size_t vl) { return __riscv_vmacc_vv_u8m8_tu(vd, vs1, vs2, vl); } -vuint8m8_t test_vmacc_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vmacc_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, + size_t vl) { return __riscv_vmacc_vx_u8m8_tu(vd, rs1, vs2, vl); } -vuint16mf4_t test_vmacc_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vmacc_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, + vuint16mf4_t vs2, size_t vl) { return __riscv_vmacc_vv_u16mf4_tu(vd, vs1, vs2, vl); } -vuint16mf4_t test_vmacc_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vmacc_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, + vuint16mf4_t vs2, size_t vl) { return __riscv_vmacc_vx_u16mf4_tu(vd, rs1, vs2, vl); } -vuint16mf2_t test_vmacc_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vmacc_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, + vuint16mf2_t vs2, size_t vl) { return __riscv_vmacc_vv_u16mf2_tu(vd, vs1, vs2, vl); } -vuint16mf2_t test_vmacc_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vmacc_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, + vuint16mf2_t vs2, size_t vl) { return __riscv_vmacc_vx_u16mf2_tu(vd, rs1, vs2, vl); } -vuint16m1_t test_vmacc_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vmacc_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, + vuint16m1_t vs2, size_t vl) { return __riscv_vmacc_vv_u16m1_tu(vd, vs1, vs2, vl); } -vuint16m1_t test_vmacc_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vmacc_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, + vuint16m1_t vs2, size_t vl) { return __riscv_vmacc_vx_u16m1_tu(vd, rs1, vs2, vl); } -vuint16m2_t test_vmacc_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vmacc_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vmacc_vv_u16m2_tu(vd, vs1, vs2, vl); } -vuint16m2_t test_vmacc_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vmacc_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vmacc_vx_u16m2_tu(vd, rs1, vs2, vl); } -vuint16m4_t test_vmacc_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vmacc_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vmacc_vv_u16m4_tu(vd, vs1, vs2, vl); } -vuint16m4_t test_vmacc_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vmacc_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vmacc_vx_u16m4_tu(vd, rs1, vs2, vl); } -vuint16m8_t test_vmacc_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vmacc_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, + vuint16m8_t vs2, size_t vl) { return __riscv_vmacc_vv_u16m8_tu(vd, vs1, vs2, vl); } -vuint16m8_t test_vmacc_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vmacc_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, + vuint16m8_t vs2, size_t vl) { return __riscv_vmacc_vx_u16m8_tu(vd, rs1, vs2, vl); } -vuint32mf2_t test_vmacc_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vmacc_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, + vuint32mf2_t vs2, size_t vl) { return __riscv_vmacc_vv_u32mf2_tu(vd, vs1, vs2, vl); } -vuint32mf2_t test_vmacc_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vmacc_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, + vuint32mf2_t vs2, size_t vl) { return __riscv_vmacc_vx_u32mf2_tu(vd, rs1, vs2, vl); } -vuint32m1_t test_vmacc_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vmacc_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, + vuint32m1_t vs2, size_t vl) { return __riscv_vmacc_vv_u32m1_tu(vd, vs1, vs2, vl); } -vuint32m1_t test_vmacc_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vmacc_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, + vuint32m1_t vs2, size_t vl) { return __riscv_vmacc_vx_u32m1_tu(vd, rs1, vs2, vl); } -vuint32m2_t test_vmacc_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vmacc_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, + vuint32m2_t vs2, size_t vl) { return __riscv_vmacc_vv_u32m2_tu(vd, vs1, vs2, vl); } -vuint32m2_t test_vmacc_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vmacc_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, + vuint32m2_t vs2, size_t vl) { return __riscv_vmacc_vx_u32m2_tu(vd, rs1, vs2, vl); } -vuint32m4_t test_vmacc_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vmacc_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vmacc_vv_u32m4_tu(vd, vs1, vs2, vl); } -vuint32m4_t test_vmacc_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vmacc_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vmacc_vx_u32m4_tu(vd, rs1, vs2, vl); } -vuint32m8_t test_vmacc_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vmacc_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, + vuint32m8_t vs2, size_t vl) { return __riscv_vmacc_vv_u32m8_tu(vd, vs1, vs2, vl); } -vuint32m8_t test_vmacc_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vmacc_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, + vuint32m8_t vs2, size_t vl) { return __riscv_vmacc_vx_u32m8_tu(vd, rs1, vs2, vl); } -vuint64m1_t test_vmacc_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vmacc_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, + vuint64m1_t vs2, size_t vl) { return __riscv_vmacc_vv_u64m1_tu(vd, vs1, vs2, vl); } -vuint64m1_t test_vmacc_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vmacc_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, + vuint64m1_t vs2, size_t vl) { return __riscv_vmacc_vx_u64m1_tu(vd, rs1, vs2, vl); } -vuint64m2_t test_vmacc_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vmacc_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, + vuint64m2_t vs2, size_t vl) { return __riscv_vmacc_vv_u64m2_tu(vd, vs1, vs2, vl); } -vuint64m2_t test_vmacc_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vmacc_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, + vuint64m2_t vs2, size_t vl) { return __riscv_vmacc_vx_u64m2_tu(vd, rs1, vs2, vl); } -vuint64m4_t test_vmacc_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vmacc_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, + vuint64m4_t vs2, size_t vl) { return __riscv_vmacc_vv_u64m4_tu(vd, vs1, vs2, vl); } -vuint64m4_t test_vmacc_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vmacc_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, + vuint64m4_t vs2, size_t vl) { return __riscv_vmacc_vx_u64m4_tu(vd, rs1, vs2, vl); } -vuint64m8_t test_vmacc_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vmacc_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, + vuint64m8_t vs2, size_t vl) { return __riscv_vmacc_vv_u64m8_tu(vd, vs1, vs2, vl); } -vuint64m8_t test_vmacc_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vmacc_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, + vuint64m8_t vs2, size_t vl) { return __riscv_vmacc_vx_u64m8_tu(vd, rs1, vs2, vl); } -vint8mf8_t test_vmacc_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vmacc_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, + vint8mf8_t vs2, size_t vl) { return __riscv_vmacc_vv_i8mf8_tum(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vmacc_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vmacc_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, int8_t rs1, + vint8mf8_t vs2, size_t vl) { return __riscv_vmacc_vx_i8mf8_tum(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vmacc_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vmacc_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, + vint8mf4_t vs2, size_t vl) { return __riscv_vmacc_vv_i8mf4_tum(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vmacc_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vmacc_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, int8_t rs1, + vint8mf4_t vs2, size_t vl) { return __riscv_vmacc_vx_i8mf4_tum(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vmacc_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vmacc_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vmacc_vv_i8mf2_tum(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vmacc_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vmacc_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, int8_t rs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vmacc_vx_i8mf2_tum(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vmacc_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vmacc_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, + vint8m1_t vs2, size_t vl) { return __riscv_vmacc_vv_i8m1_tum(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vmacc_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vmacc_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, int8_t rs1, + vint8m1_t vs2, size_t vl) { return __riscv_vmacc_vx_i8m1_tum(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vmacc_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vmacc_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, + vint8m2_t vs2, size_t vl) { return __riscv_vmacc_vv_i8m2_tum(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vmacc_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vmacc_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, int8_t rs1, + vint8m2_t vs2, size_t vl) { return __riscv_vmacc_vx_i8m2_tum(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vmacc_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vmacc_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, + vint8m4_t vs2, size_t vl) { return __riscv_vmacc_vv_i8m4_tum(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vmacc_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vmacc_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, int8_t rs1, + vint8m4_t vs2, size_t vl) { return __riscv_vmacc_vx_i8m4_tum(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vmacc_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vmacc_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, + vint8m8_t vs2, size_t vl) { return __riscv_vmacc_vv_i8m8_tum(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vmacc_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vmacc_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, int8_t rs1, + vint8m8_t vs2, size_t vl) { return __riscv_vmacc_vx_i8m8_tum(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vmacc_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vmacc_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs1, vint16mf4_t vs2, + size_t vl) { return __riscv_vmacc_vv_i16mf4_tum(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vmacc_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vmacc_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, int16_t rs1, + vint16mf4_t vs2, size_t vl) { return __riscv_vmacc_vx_i16mf4_tum(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vmacc_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vmacc_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs1, vint16mf2_t vs2, + size_t vl) { return __riscv_vmacc_vv_i16mf2_tum(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vmacc_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vmacc_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, int16_t rs1, + vint16mf2_t vs2, size_t vl) { return __riscv_vmacc_vx_i16mf2_tum(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vmacc_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vmacc_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, + vint16m1_t vs2, size_t vl) { return __riscv_vmacc_vv_i16m1_tum(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vmacc_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vmacc_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, int16_t rs1, + vint16m1_t vs2, size_t vl) { return __riscv_vmacc_vx_i16m1_tum(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vmacc_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vmacc_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, + vint16m2_t vs2, size_t vl) { return __riscv_vmacc_vv_i16m2_tum(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vmacc_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vmacc_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, int16_t rs1, + vint16m2_t vs2, size_t vl) { return __riscv_vmacc_vx_i16m2_tum(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vmacc_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vmacc_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, + vint16m4_t vs2, size_t vl) { return __riscv_vmacc_vv_i16m4_tum(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vmacc_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vmacc_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, int16_t rs1, + vint16m4_t vs2, size_t vl) { return __riscv_vmacc_vx_i16m4_tum(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vmacc_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vmacc_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, + vint16m8_t vs2, size_t vl) { return __riscv_vmacc_vv_i16m8_tum(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vmacc_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vmacc_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, int16_t rs1, + vint16m8_t vs2, size_t vl) { return __riscv_vmacc_vx_i16m8_tum(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vmacc_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vmacc_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs1, vint32mf2_t vs2, + size_t vl) { return __riscv_vmacc_vv_i32mf2_tum(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vmacc_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vmacc_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, int32_t rs1, + vint32mf2_t vs2, size_t vl) { return __riscv_vmacc_vx_i32mf2_tum(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vmacc_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vmacc_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, + vint32m1_t vs2, size_t vl) { return __riscv_vmacc_vv_i32m1_tum(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vmacc_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vmacc_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, int32_t rs1, + vint32m1_t vs2, size_t vl) { return __riscv_vmacc_vx_i32m1_tum(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vmacc_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vmacc_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, + vint32m2_t vs2, size_t vl) { return __riscv_vmacc_vv_i32m2_tum(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vmacc_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vmacc_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, int32_t rs1, + vint32m2_t vs2, size_t vl) { return __riscv_vmacc_vx_i32m2_tum(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vmacc_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vmacc_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, + vint32m4_t vs2, size_t vl) { return __riscv_vmacc_vv_i32m4_tum(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vmacc_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vmacc_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, int32_t rs1, + vint32m4_t vs2, size_t vl) { return __riscv_vmacc_vx_i32m4_tum(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vmacc_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vmacc_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, + vint32m8_t vs2, size_t vl) { return __riscv_vmacc_vv_i32m8_tum(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vmacc_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vmacc_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, int32_t rs1, + vint32m8_t vs2, size_t vl) { return __riscv_vmacc_vx_i32m8_tum(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vmacc_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vmacc_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, + vint64m1_t vs2, size_t vl) { return __riscv_vmacc_vv_i64m1_tum(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vmacc_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vmacc_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, int64_t rs1, + vint64m1_t vs2, size_t vl) { return __riscv_vmacc_vx_i64m1_tum(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vmacc_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vmacc_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, + vint64m2_t vs2, size_t vl) { return __riscv_vmacc_vv_i64m2_tum(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vmacc_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vmacc_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, int64_t rs1, + vint64m2_t vs2, size_t vl) { return __riscv_vmacc_vx_i64m2_tum(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vmacc_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vmacc_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, + vint64m4_t vs2, size_t vl) { return __riscv_vmacc_vv_i64m4_tum(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vmacc_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vmacc_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, int64_t rs1, + vint64m4_t vs2, size_t vl) { return __riscv_vmacc_vx_i64m4_tum(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vmacc_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vmacc_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, + vint64m8_t vs2, size_t vl) { return __riscv_vmacc_vv_i64m8_tum(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vmacc_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vmacc_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, int64_t rs1, + vint64m8_t vs2, size_t vl) { return __riscv_vmacc_vx_i64m8_tum(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vmacc_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vmacc_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs1, vuint8mf8_t vs2, + size_t vl) { return __riscv_vmacc_vv_u8mf8_tum(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vmacc_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vmacc_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, + vuint8mf8_t vs2, size_t vl) { return __riscv_vmacc_vx_u8mf8_tum(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vmacc_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vmacc_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs1, vuint8mf4_t vs2, + size_t vl) { return __riscv_vmacc_vv_u8mf4_tum(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vmacc_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vmacc_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, + vuint8mf4_t vs2, size_t vl) { return __riscv_vmacc_vx_u8mf4_tum(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vmacc_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vmacc_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs1, vuint8mf2_t vs2, + size_t vl) { return __riscv_vmacc_vv_u8mf2_tum(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vmacc_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vmacc_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, + vuint8mf2_t vs2, size_t vl) { return __riscv_vmacc_vx_u8mf2_tum(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vmacc_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vmacc_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vmacc_vv_u8m1_tum(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vmacc_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vmacc_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vmacc_vx_u8m1_tum(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vmacc_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vmacc_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vmacc_vv_u8m2_tum(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vmacc_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vmacc_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vmacc_vx_u8m2_tum(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vmacc_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vmacc_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vmacc_vv_u8m4_tum(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vmacc_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vmacc_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vmacc_vx_u8m4_tum(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vmacc_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vmacc_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, + vuint8m8_t vs2, size_t vl) { return __riscv_vmacc_vv_u8m8_tum(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vmacc_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vmacc_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, + vuint8m8_t vs2, size_t vl) { return __riscv_vmacc_vx_u8m8_tum(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vmacc_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vmacc_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vmacc_vv_u16mf4_tum(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vmacc_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vmacc_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + uint16_t rs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vmacc_vx_u16mf4_tum(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vmacc_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vmacc_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vmacc_vv_u16mf2_tum(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vmacc_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vmacc_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + uint16_t rs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vmacc_vx_u16mf2_tum(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vmacc_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vmacc_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs1, vuint16m1_t vs2, + size_t vl) { return __riscv_vmacc_vv_u16m1_tum(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vmacc_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vmacc_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, + vuint16m1_t vs2, size_t vl) { return __riscv_vmacc_vx_u16m1_tum(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vmacc_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vmacc_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs1, vuint16m2_t vs2, + size_t vl) { return __riscv_vmacc_vv_u16m2_tum(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vmacc_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vmacc_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vmacc_vx_u16m2_tum(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vmacc_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vmacc_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs1, vuint16m4_t vs2, + size_t vl) { return __riscv_vmacc_vv_u16m4_tum(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vmacc_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vmacc_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vmacc_vx_u16m4_tum(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vmacc_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vmacc_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs1, vuint16m8_t vs2, + size_t vl) { return __riscv_vmacc_vv_u16m8_tum(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vmacc_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vmacc_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, + vuint16m8_t vs2, size_t vl) { return __riscv_vmacc_vx_u16m8_tum(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vmacc_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vmacc_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vmacc_vv_u32mf2_tum(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vmacc_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vmacc_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + uint32_t rs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vmacc_vx_u32mf2_tum(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vmacc_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vmacc_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs1, vuint32m1_t vs2, + size_t vl) { return __riscv_vmacc_vv_u32m1_tum(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vmacc_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vmacc_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, + vuint32m1_t vs2, size_t vl) { return __riscv_vmacc_vx_u32m1_tum(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vmacc_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vmacc_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs1, vuint32m2_t vs2, + size_t vl) { return __riscv_vmacc_vv_u32m2_tum(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vmacc_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vmacc_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, + vuint32m2_t vs2, size_t vl) { return __riscv_vmacc_vx_u32m2_tum(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vmacc_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vmacc_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs1, vuint32m4_t vs2, + size_t vl) { return __riscv_vmacc_vv_u32m4_tum(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vmacc_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vmacc_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vmacc_vx_u32m4_tum(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vmacc_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vmacc_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs1, vuint32m8_t vs2, + size_t vl) { return __riscv_vmacc_vv_u32m8_tum(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vmacc_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vmacc_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, + vuint32m8_t vs2, size_t vl) { return __riscv_vmacc_vx_u32m8_tum(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vmacc_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vmacc_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs1, vuint64m1_t vs2, + size_t vl) { return __riscv_vmacc_vv_u64m1_tum(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vmacc_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vmacc_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, + vuint64m1_t vs2, size_t vl) { return __riscv_vmacc_vx_u64m1_tum(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vmacc_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vmacc_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs1, vuint64m2_t vs2, + size_t vl) { return __riscv_vmacc_vv_u64m2_tum(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vmacc_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vmacc_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, + vuint64m2_t vs2, size_t vl) { return __riscv_vmacc_vx_u64m2_tum(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vmacc_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vmacc_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs1, vuint64m4_t vs2, + size_t vl) { return __riscv_vmacc_vv_u64m4_tum(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vmacc_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vmacc_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, + vuint64m4_t vs2, size_t vl) { return __riscv_vmacc_vx_u64m4_tum(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vmacc_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vmacc_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs1, vuint64m8_t vs2, + size_t vl) { return __riscv_vmacc_vv_u64m8_tum(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vmacc_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vmacc_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, + vuint64m8_t vs2, size_t vl) { return __riscv_vmacc_vx_u64m8_tum(vm, vd, rs1, vs2, vl); } -vint8mf8_t test_vmacc_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vmacc_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, + vint8mf8_t vs2, size_t vl) { return __riscv_vmacc_vv_i8mf8_tumu(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vmacc_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vmacc_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, + vint8mf8_t vs2, size_t vl) { return __riscv_vmacc_vx_i8mf8_tumu(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vmacc_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vmacc_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, + vint8mf4_t vs2, size_t vl) { return __riscv_vmacc_vv_i8mf4_tumu(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vmacc_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vmacc_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, + vint8mf4_t vs2, size_t vl) { return __riscv_vmacc_vx_i8mf4_tumu(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vmacc_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vmacc_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vmacc_vv_i8mf2_tumu(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vmacc_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vmacc_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vmacc_vx_i8mf2_tumu(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vmacc_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vmacc_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, + vint8m1_t vs2, size_t vl) { return __riscv_vmacc_vv_i8m1_tumu(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vmacc_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vmacc_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, int8_t rs1, + vint8m1_t vs2, size_t vl) { return __riscv_vmacc_vx_i8m1_tumu(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vmacc_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vmacc_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, + vint8m2_t vs2, size_t vl) { return __riscv_vmacc_vv_i8m2_tumu(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vmacc_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vmacc_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, int8_t rs1, + vint8m2_t vs2, size_t vl) { return __riscv_vmacc_vx_i8m2_tumu(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vmacc_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vmacc_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, + vint8m4_t vs2, size_t vl) { return __riscv_vmacc_vv_i8m4_tumu(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vmacc_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vmacc_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, int8_t rs1, + vint8m4_t vs2, size_t vl) { return __riscv_vmacc_vx_i8m4_tumu(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vmacc_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vmacc_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, + vint8m8_t vs2, size_t vl) { return __riscv_vmacc_vv_i8m8_tumu(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vmacc_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vmacc_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, int8_t rs1, + vint8m8_t vs2, size_t vl) { return __riscv_vmacc_vx_i8m8_tumu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vmacc_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vmacc_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs1, vint16mf4_t vs2, + size_t vl) { return __riscv_vmacc_vv_i16mf4_tumu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vmacc_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vmacc_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, int16_t rs1, + vint16mf4_t vs2, size_t vl) { return __riscv_vmacc_vx_i16mf4_tumu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vmacc_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vmacc_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs1, vint16mf2_t vs2, + size_t vl) { return __riscv_vmacc_vv_i16mf2_tumu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vmacc_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vmacc_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, int16_t rs1, + vint16mf2_t vs2, size_t vl) { return __riscv_vmacc_vx_i16mf2_tumu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vmacc_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vmacc_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, + vint16m1_t vs2, size_t vl) { return __riscv_vmacc_vv_i16m1_tumu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vmacc_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vmacc_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, int16_t rs1, + vint16m1_t vs2, size_t vl) { return __riscv_vmacc_vx_i16m1_tumu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vmacc_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vmacc_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, + vint16m2_t vs2, size_t vl) { return __riscv_vmacc_vv_i16m2_tumu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vmacc_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vmacc_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, int16_t rs1, + vint16m2_t vs2, size_t vl) { return __riscv_vmacc_vx_i16m2_tumu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vmacc_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vmacc_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, + vint16m4_t vs2, size_t vl) { return __riscv_vmacc_vv_i16m4_tumu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vmacc_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vmacc_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, int16_t rs1, + vint16m4_t vs2, size_t vl) { return __riscv_vmacc_vx_i16m4_tumu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vmacc_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vmacc_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, + vint16m8_t vs2, size_t vl) { return __riscv_vmacc_vv_i16m8_tumu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vmacc_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vmacc_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, int16_t rs1, + vint16m8_t vs2, size_t vl) { return __riscv_vmacc_vx_i16m8_tumu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vmacc_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vmacc_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs1, vint32mf2_t vs2, + size_t vl) { return __riscv_vmacc_vv_i32mf2_tumu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vmacc_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vmacc_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, int32_t rs1, + vint32mf2_t vs2, size_t vl) { return __riscv_vmacc_vx_i32mf2_tumu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vmacc_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vmacc_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, + vint32m1_t vs2, size_t vl) { return __riscv_vmacc_vv_i32m1_tumu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vmacc_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vmacc_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, int32_t rs1, + vint32m1_t vs2, size_t vl) { return __riscv_vmacc_vx_i32m1_tumu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vmacc_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vmacc_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, + vint32m2_t vs2, size_t vl) { return __riscv_vmacc_vv_i32m2_tumu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vmacc_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vmacc_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, int32_t rs1, + vint32m2_t vs2, size_t vl) { return __riscv_vmacc_vx_i32m2_tumu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vmacc_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vmacc_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, + vint32m4_t vs2, size_t vl) { return __riscv_vmacc_vv_i32m4_tumu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vmacc_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vmacc_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, int32_t rs1, + vint32m4_t vs2, size_t vl) { return __riscv_vmacc_vx_i32m4_tumu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vmacc_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vmacc_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, + vint32m8_t vs2, size_t vl) { return __riscv_vmacc_vv_i32m8_tumu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vmacc_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vmacc_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, int32_t rs1, + vint32m8_t vs2, size_t vl) { return __riscv_vmacc_vx_i32m8_tumu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vmacc_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vmacc_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, + vint64m1_t vs2, size_t vl) { return __riscv_vmacc_vv_i64m1_tumu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vmacc_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vmacc_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, int64_t rs1, + vint64m1_t vs2, size_t vl) { return __riscv_vmacc_vx_i64m1_tumu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vmacc_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vmacc_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, + vint64m2_t vs2, size_t vl) { return __riscv_vmacc_vv_i64m2_tumu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vmacc_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vmacc_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, int64_t rs1, + vint64m2_t vs2, size_t vl) { return __riscv_vmacc_vx_i64m2_tumu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vmacc_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vmacc_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, + vint64m4_t vs2, size_t vl) { return __riscv_vmacc_vv_i64m4_tumu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vmacc_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vmacc_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, int64_t rs1, + vint64m4_t vs2, size_t vl) { return __riscv_vmacc_vx_i64m4_tumu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vmacc_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vmacc_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, + vint64m8_t vs2, size_t vl) { return __riscv_vmacc_vv_i64m8_tumu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vmacc_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vmacc_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, int64_t rs1, + vint64m8_t vs2, size_t vl) { return __riscv_vmacc_vx_i64m8_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vmacc_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vmacc_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs1, vuint8mf8_t vs2, + size_t vl) { return __riscv_vmacc_vv_u8mf8_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vmacc_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vmacc_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, + vuint8mf8_t vs2, size_t vl) { return __riscv_vmacc_vx_u8mf8_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vmacc_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vmacc_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs1, vuint8mf4_t vs2, + size_t vl) { return __riscv_vmacc_vv_u8mf4_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vmacc_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vmacc_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, + vuint8mf4_t vs2, size_t vl) { return __riscv_vmacc_vx_u8mf4_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vmacc_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vmacc_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs1, vuint8mf2_t vs2, + size_t vl) { return __riscv_vmacc_vv_u8mf2_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vmacc_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vmacc_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, + vuint8mf2_t vs2, size_t vl) { return __riscv_vmacc_vx_u8mf2_tumu(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vmacc_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vmacc_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vmacc_vv_u8m1_tumu(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vmacc_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vmacc_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vmacc_vx_u8m1_tumu(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vmacc_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vmacc_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vmacc_vv_u8m2_tumu(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vmacc_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vmacc_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vmacc_vx_u8m2_tumu(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vmacc_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vmacc_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vmacc_vv_u8m4_tumu(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vmacc_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vmacc_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vmacc_vx_u8m4_tumu(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vmacc_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vmacc_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, + vuint8m8_t vs2, size_t vl) { return __riscv_vmacc_vv_u8m8_tumu(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vmacc_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vmacc_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, + vuint8m8_t vs2, size_t vl) { return __riscv_vmacc_vx_u8m8_tumu(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vmacc_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vmacc_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vmacc_vv_u16mf4_tumu(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vmacc_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vmacc_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + uint16_t rs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vmacc_vx_u16mf4_tumu(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vmacc_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vmacc_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vmacc_vv_u16mf2_tumu(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vmacc_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vmacc_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + uint16_t rs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vmacc_vx_u16mf2_tumu(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vmacc_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vmacc_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs1, vuint16m1_t vs2, + size_t vl) { return __riscv_vmacc_vv_u16m1_tumu(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vmacc_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vmacc_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, + vuint16m1_t vs2, size_t vl) { return __riscv_vmacc_vx_u16m1_tumu(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vmacc_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vmacc_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs1, vuint16m2_t vs2, + size_t vl) { return __riscv_vmacc_vv_u16m2_tumu(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vmacc_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vmacc_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vmacc_vx_u16m2_tumu(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vmacc_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vmacc_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs1, vuint16m4_t vs2, + size_t vl) { return __riscv_vmacc_vv_u16m4_tumu(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vmacc_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vmacc_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vmacc_vx_u16m4_tumu(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vmacc_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vmacc_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs1, vuint16m8_t vs2, + size_t vl) { return __riscv_vmacc_vv_u16m8_tumu(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vmacc_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vmacc_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, + vuint16m8_t vs2, size_t vl) { return __riscv_vmacc_vx_u16m8_tumu(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vmacc_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vmacc_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vmacc_vv_u32mf2_tumu(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vmacc_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vmacc_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + uint32_t rs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vmacc_vx_u32mf2_tumu(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vmacc_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vmacc_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs1, vuint32m1_t vs2, + size_t vl) { return __riscv_vmacc_vv_u32m1_tumu(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vmacc_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vmacc_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, + vuint32m1_t vs2, size_t vl) { return __riscv_vmacc_vx_u32m1_tumu(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vmacc_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vmacc_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs1, vuint32m2_t vs2, + size_t vl) { return __riscv_vmacc_vv_u32m2_tumu(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vmacc_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vmacc_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, + vuint32m2_t vs2, size_t vl) { return __riscv_vmacc_vx_u32m2_tumu(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vmacc_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vmacc_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs1, vuint32m4_t vs2, + size_t vl) { return __riscv_vmacc_vv_u32m4_tumu(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vmacc_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vmacc_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vmacc_vx_u32m4_tumu(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vmacc_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vmacc_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs1, vuint32m8_t vs2, + size_t vl) { return __riscv_vmacc_vv_u32m8_tumu(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vmacc_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vmacc_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, + vuint32m8_t vs2, size_t vl) { return __riscv_vmacc_vx_u32m8_tumu(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vmacc_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vmacc_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs1, vuint64m1_t vs2, + size_t vl) { return __riscv_vmacc_vv_u64m1_tumu(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vmacc_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vmacc_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, + vuint64m1_t vs2, size_t vl) { return __riscv_vmacc_vx_u64m1_tumu(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vmacc_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vmacc_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs1, vuint64m2_t vs2, + size_t vl) { return __riscv_vmacc_vv_u64m2_tumu(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vmacc_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vmacc_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, + vuint64m2_t vs2, size_t vl) { return __riscv_vmacc_vx_u64m2_tumu(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vmacc_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vmacc_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs1, vuint64m4_t vs2, + size_t vl) { return __riscv_vmacc_vv_u64m4_tumu(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vmacc_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vmacc_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, + vuint64m4_t vs2, size_t vl) { return __riscv_vmacc_vx_u64m4_tumu(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vmacc_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vmacc_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs1, vuint64m8_t vs2, + size_t vl) { return __riscv_vmacc_vv_u64m8_tumu(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vmacc_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vmacc_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, + vuint64m8_t vs2, size_t vl) { return __riscv_vmacc_vx_u64m8_tumu(vm, vd, rs1, vs2, vl); } -vint8mf8_t test_vmacc_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vmacc_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, + vint8mf8_t vs2, size_t vl) { return __riscv_vmacc_vv_i8mf8_mu(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vmacc_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vmacc_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, + vint8mf8_t vs2, size_t vl) { return __riscv_vmacc_vx_i8mf8_mu(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vmacc_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vmacc_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, + vint8mf4_t vs2, size_t vl) { return __riscv_vmacc_vv_i8mf4_mu(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vmacc_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vmacc_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, + vint8mf4_t vs2, size_t vl) { return __riscv_vmacc_vx_i8mf4_mu(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vmacc_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vmacc_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vmacc_vv_i8mf2_mu(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vmacc_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vmacc_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vmacc_vx_i8mf2_mu(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vmacc_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vmacc_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, + vint8m1_t vs2, size_t vl) { return __riscv_vmacc_vv_i8m1_mu(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vmacc_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vmacc_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, int8_t rs1, + vint8m1_t vs2, size_t vl) { return __riscv_vmacc_vx_i8m1_mu(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vmacc_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vmacc_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, + vint8m2_t vs2, size_t vl) { return __riscv_vmacc_vv_i8m2_mu(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vmacc_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vmacc_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, int8_t rs1, + vint8m2_t vs2, size_t vl) { return __riscv_vmacc_vx_i8m2_mu(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vmacc_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vmacc_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, + vint8m4_t vs2, size_t vl) { return __riscv_vmacc_vv_i8m4_mu(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vmacc_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vmacc_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, int8_t rs1, + vint8m4_t vs2, size_t vl) { return __riscv_vmacc_vx_i8m4_mu(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vmacc_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vmacc_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, + vint8m8_t vs2, size_t vl) { return __riscv_vmacc_vv_i8m8_mu(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vmacc_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vmacc_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, int8_t rs1, + vint8m8_t vs2, size_t vl) { return __riscv_vmacc_vx_i8m8_mu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vmacc_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vmacc_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs1, vint16mf4_t vs2, + size_t vl) { return __riscv_vmacc_vv_i16mf4_mu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vmacc_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vmacc_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, int16_t rs1, + vint16mf4_t vs2, size_t vl) { return __riscv_vmacc_vx_i16mf4_mu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vmacc_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vmacc_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs1, vint16mf2_t vs2, + size_t vl) { return __riscv_vmacc_vv_i16mf2_mu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vmacc_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vmacc_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, int16_t rs1, + vint16mf2_t vs2, size_t vl) { return __riscv_vmacc_vx_i16mf2_mu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vmacc_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vmacc_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, + vint16m1_t vs2, size_t vl) { return __riscv_vmacc_vv_i16m1_mu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vmacc_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vmacc_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, int16_t rs1, + vint16m1_t vs2, size_t vl) { return __riscv_vmacc_vx_i16m1_mu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vmacc_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vmacc_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, + vint16m2_t vs2, size_t vl) { return __riscv_vmacc_vv_i16m2_mu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vmacc_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vmacc_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, int16_t rs1, + vint16m2_t vs2, size_t vl) { return __riscv_vmacc_vx_i16m2_mu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vmacc_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vmacc_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, + vint16m4_t vs2, size_t vl) { return __riscv_vmacc_vv_i16m4_mu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vmacc_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vmacc_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, int16_t rs1, + vint16m4_t vs2, size_t vl) { return __riscv_vmacc_vx_i16m4_mu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vmacc_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vmacc_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, + vint16m8_t vs2, size_t vl) { return __riscv_vmacc_vv_i16m8_mu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vmacc_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vmacc_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, int16_t rs1, + vint16m8_t vs2, size_t vl) { return __riscv_vmacc_vx_i16m8_mu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vmacc_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vmacc_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs1, vint32mf2_t vs2, + size_t vl) { return __riscv_vmacc_vv_i32mf2_mu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vmacc_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vmacc_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, int32_t rs1, + vint32mf2_t vs2, size_t vl) { return __riscv_vmacc_vx_i32mf2_mu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vmacc_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vmacc_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, + vint32m1_t vs2, size_t vl) { return __riscv_vmacc_vv_i32m1_mu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vmacc_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vmacc_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, int32_t rs1, + vint32m1_t vs2, size_t vl) { return __riscv_vmacc_vx_i32m1_mu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vmacc_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vmacc_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, + vint32m2_t vs2, size_t vl) { return __riscv_vmacc_vv_i32m2_mu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vmacc_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vmacc_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, int32_t rs1, + vint32m2_t vs2, size_t vl) { return __riscv_vmacc_vx_i32m2_mu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vmacc_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vmacc_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, + vint32m4_t vs2, size_t vl) { return __riscv_vmacc_vv_i32m4_mu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vmacc_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vmacc_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, int32_t rs1, + vint32m4_t vs2, size_t vl) { return __riscv_vmacc_vx_i32m4_mu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vmacc_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vmacc_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, + vint32m8_t vs2, size_t vl) { return __riscv_vmacc_vv_i32m8_mu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vmacc_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vmacc_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, int32_t rs1, + vint32m8_t vs2, size_t vl) { return __riscv_vmacc_vx_i32m8_mu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vmacc_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vmacc_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, + vint64m1_t vs2, size_t vl) { return __riscv_vmacc_vv_i64m1_mu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vmacc_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vmacc_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, int64_t rs1, + vint64m1_t vs2, size_t vl) { return __riscv_vmacc_vx_i64m1_mu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vmacc_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vmacc_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, + vint64m2_t vs2, size_t vl) { return __riscv_vmacc_vv_i64m2_mu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vmacc_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vmacc_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, int64_t rs1, + vint64m2_t vs2, size_t vl) { return __riscv_vmacc_vx_i64m2_mu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vmacc_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vmacc_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, + vint64m4_t vs2, size_t vl) { return __riscv_vmacc_vv_i64m4_mu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vmacc_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vmacc_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, int64_t rs1, + vint64m4_t vs2, size_t vl) { return __riscv_vmacc_vx_i64m4_mu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vmacc_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vmacc_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, + vint64m8_t vs2, size_t vl) { return __riscv_vmacc_vv_i64m8_mu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vmacc_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vmacc_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, int64_t rs1, + vint64m8_t vs2, size_t vl) { return __riscv_vmacc_vx_i64m8_mu(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vmacc_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vmacc_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs1, vuint8mf8_t vs2, + size_t vl) { return __riscv_vmacc_vv_u8mf8_mu(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vmacc_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vmacc_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, + vuint8mf8_t vs2, size_t vl) { return __riscv_vmacc_vx_u8mf8_mu(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vmacc_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vmacc_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs1, vuint8mf4_t vs2, + size_t vl) { return __riscv_vmacc_vv_u8mf4_mu(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vmacc_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vmacc_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, + vuint8mf4_t vs2, size_t vl) { return __riscv_vmacc_vx_u8mf4_mu(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vmacc_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vmacc_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs1, vuint8mf2_t vs2, + size_t vl) { return __riscv_vmacc_vv_u8mf2_mu(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vmacc_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vmacc_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, + vuint8mf2_t vs2, size_t vl) { return __riscv_vmacc_vx_u8mf2_mu(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vmacc_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vmacc_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vmacc_vv_u8m1_mu(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vmacc_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vmacc_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vmacc_vx_u8m1_mu(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vmacc_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vmacc_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vmacc_vv_u8m2_mu(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vmacc_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vmacc_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vmacc_vx_u8m2_mu(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vmacc_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vmacc_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vmacc_vv_u8m4_mu(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vmacc_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vmacc_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vmacc_vx_u8m4_mu(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vmacc_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vmacc_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, + vuint8m8_t vs2, size_t vl) { return __riscv_vmacc_vv_u8m8_mu(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vmacc_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vmacc_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, + vuint8m8_t vs2, size_t vl) { return __riscv_vmacc_vx_u8m8_mu(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vmacc_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vmacc_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vmacc_vv_u16mf4_mu(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vmacc_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vmacc_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + uint16_t rs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vmacc_vx_u16mf4_mu(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vmacc_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vmacc_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vmacc_vv_u16mf2_mu(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vmacc_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vmacc_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + uint16_t rs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vmacc_vx_u16mf2_mu(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vmacc_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vmacc_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs1, vuint16m1_t vs2, + size_t vl) { return __riscv_vmacc_vv_u16m1_mu(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vmacc_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vmacc_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, + vuint16m1_t vs2, size_t vl) { return __riscv_vmacc_vx_u16m1_mu(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vmacc_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vmacc_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vmacc_vv_u16m2_mu(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vmacc_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vmacc_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vmacc_vx_u16m2_mu(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vmacc_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vmacc_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vmacc_vv_u16m4_mu(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vmacc_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vmacc_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vmacc_vx_u16m4_mu(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vmacc_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vmacc_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, + vuint16m8_t vs2, size_t vl) { return __riscv_vmacc_vv_u16m8_mu(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vmacc_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vmacc_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, + vuint16m8_t vs2, size_t vl) { return __riscv_vmacc_vx_u16m8_mu(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vmacc_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vmacc_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vmacc_vv_u32mf2_mu(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vmacc_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vmacc_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + uint32_t rs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vmacc_vx_u32mf2_mu(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vmacc_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vmacc_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs1, vuint32m1_t vs2, + size_t vl) { return __riscv_vmacc_vv_u32m1_mu(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vmacc_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vmacc_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, + vuint32m1_t vs2, size_t vl) { return __riscv_vmacc_vx_u32m1_mu(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vmacc_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vmacc_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs1, vuint32m2_t vs2, + size_t vl) { return __riscv_vmacc_vv_u32m2_mu(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vmacc_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vmacc_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, + vuint32m2_t vs2, size_t vl) { return __riscv_vmacc_vx_u32m2_mu(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vmacc_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vmacc_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vmacc_vv_u32m4_mu(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vmacc_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vmacc_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vmacc_vx_u32m4_mu(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vmacc_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vmacc_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, + vuint32m8_t vs2, size_t vl) { return __riscv_vmacc_vv_u32m8_mu(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vmacc_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vmacc_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, + vuint32m8_t vs2, size_t vl) { return __riscv_vmacc_vx_u32m8_mu(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vmacc_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vmacc_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs1, vuint64m1_t vs2, + size_t vl) { return __riscv_vmacc_vv_u64m1_mu(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vmacc_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vmacc_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, + vuint64m1_t vs2, size_t vl) { return __riscv_vmacc_vx_u64m1_mu(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vmacc_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vmacc_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs1, vuint64m2_t vs2, + size_t vl) { return __riscv_vmacc_vv_u64m2_mu(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vmacc_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vmacc_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, + vuint64m2_t vs2, size_t vl) { return __riscv_vmacc_vx_u64m2_mu(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vmacc_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vmacc_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs1, vuint64m4_t vs2, + size_t vl) { return __riscv_vmacc_vv_u64m4_mu(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vmacc_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vmacc_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, + vuint64m4_t vs2, size_t vl) { return __riscv_vmacc_vx_u64m4_mu(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vmacc_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vmacc_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, + vuint64m8_t vs2, size_t vl) { return __riscv_vmacc_vv_u64m8_mu(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vmacc_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vmacc_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, + vuint64m8_t vs2, size_t vl) { return __riscv_vmacc_vx_u64m8_mu(vm, vd, rs1, vs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vmadd.c b/auto-generated/policy_funcs/llvm-api-tests/vmadd.c index f63e03c22..097444941 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vmadd.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vmadd.c @@ -1,1415 +1,1833 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vint8mf8_t test_vmadd_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vmadd_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, + size_t vl) { return __riscv_vmadd_vv_i8mf8_tu(vd, vs1, vs2, vl); } -vint8mf8_t test_vmadd_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vmadd_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, + size_t vl) { return __riscv_vmadd_vx_i8mf8_tu(vd, rs1, vs2, vl); } -vint8mf4_t test_vmadd_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vmadd_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, + size_t vl) { return __riscv_vmadd_vv_i8mf4_tu(vd, vs1, vs2, vl); } -vint8mf4_t test_vmadd_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vmadd_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, + size_t vl) { return __riscv_vmadd_vx_i8mf4_tu(vd, rs1, vs2, vl); } -vint8mf2_t test_vmadd_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vmadd_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, + size_t vl) { return __riscv_vmadd_vv_i8mf2_tu(vd, vs1, vs2, vl); } -vint8mf2_t test_vmadd_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vmadd_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, + size_t vl) { return __riscv_vmadd_vx_i8mf2_tu(vd, rs1, vs2, vl); } -vint8m1_t test_vmadd_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vmadd_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, + size_t vl) { return __riscv_vmadd_vv_i8m1_tu(vd, vs1, vs2, vl); } -vint8m1_t test_vmadd_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vmadd_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, + size_t vl) { return __riscv_vmadd_vx_i8m1_tu(vd, rs1, vs2, vl); } -vint8m2_t test_vmadd_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vmadd_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, + size_t vl) { return __riscv_vmadd_vv_i8m2_tu(vd, vs1, vs2, vl); } -vint8m2_t test_vmadd_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vmadd_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, + size_t vl) { return __riscv_vmadd_vx_i8m2_tu(vd, rs1, vs2, vl); } -vint8m4_t test_vmadd_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vmadd_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, + size_t vl) { return __riscv_vmadd_vv_i8m4_tu(vd, vs1, vs2, vl); } -vint8m4_t test_vmadd_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vmadd_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, + size_t vl) { return __riscv_vmadd_vx_i8m4_tu(vd, rs1, vs2, vl); } -vint8m8_t test_vmadd_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vmadd_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, + size_t vl) { return __riscv_vmadd_vv_i8m8_tu(vd, vs1, vs2, vl); } -vint8m8_t test_vmadd_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vmadd_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, + size_t vl) { return __riscv_vmadd_vx_i8m8_tu(vd, rs1, vs2, vl); } -vint16mf4_t test_vmadd_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vmadd_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, + vint16mf4_t vs2, size_t vl) { return __riscv_vmadd_vv_i16mf4_tu(vd, vs1, vs2, vl); } -vint16mf4_t test_vmadd_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vmadd_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, + vint16mf4_t vs2, size_t vl) { return __riscv_vmadd_vx_i16mf4_tu(vd, rs1, vs2, vl); } -vint16mf2_t test_vmadd_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vmadd_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, + vint16mf2_t vs2, size_t vl) { return __riscv_vmadd_vv_i16mf2_tu(vd, vs1, vs2, vl); } -vint16mf2_t test_vmadd_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vmadd_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, + vint16mf2_t vs2, size_t vl) { return __riscv_vmadd_vx_i16mf2_tu(vd, rs1, vs2, vl); } -vint16m1_t test_vmadd_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vmadd_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, + size_t vl) { return __riscv_vmadd_vv_i16m1_tu(vd, vs1, vs2, vl); } -vint16m1_t test_vmadd_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vmadd_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, + size_t vl) { return __riscv_vmadd_vx_i16m1_tu(vd, rs1, vs2, vl); } -vint16m2_t test_vmadd_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vmadd_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, + size_t vl) { return __riscv_vmadd_vv_i16m2_tu(vd, vs1, vs2, vl); } -vint16m2_t test_vmadd_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vmadd_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, + size_t vl) { return __riscv_vmadd_vx_i16m2_tu(vd, rs1, vs2, vl); } -vint16m4_t test_vmadd_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vmadd_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, + size_t vl) { return __riscv_vmadd_vv_i16m4_tu(vd, vs1, vs2, vl); } -vint16m4_t test_vmadd_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vmadd_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, + size_t vl) { return __riscv_vmadd_vx_i16m4_tu(vd, rs1, vs2, vl); } -vint16m8_t test_vmadd_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vmadd_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, + size_t vl) { return __riscv_vmadd_vv_i16m8_tu(vd, vs1, vs2, vl); } -vint16m8_t test_vmadd_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vmadd_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, + size_t vl) { return __riscv_vmadd_vx_i16m8_tu(vd, rs1, vs2, vl); } -vint32mf2_t test_vmadd_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vmadd_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, + vint32mf2_t vs2, size_t vl) { return __riscv_vmadd_vv_i32mf2_tu(vd, vs1, vs2, vl); } -vint32mf2_t test_vmadd_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vmadd_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, + vint32mf2_t vs2, size_t vl) { return __riscv_vmadd_vx_i32mf2_tu(vd, rs1, vs2, vl); } -vint32m1_t test_vmadd_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vmadd_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, + size_t vl) { return __riscv_vmadd_vv_i32m1_tu(vd, vs1, vs2, vl); } -vint32m1_t test_vmadd_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vmadd_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, + size_t vl) { return __riscv_vmadd_vx_i32m1_tu(vd, rs1, vs2, vl); } -vint32m2_t test_vmadd_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vmadd_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, + size_t vl) { return __riscv_vmadd_vv_i32m2_tu(vd, vs1, vs2, vl); } -vint32m2_t test_vmadd_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vmadd_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, + size_t vl) { return __riscv_vmadd_vx_i32m2_tu(vd, rs1, vs2, vl); } -vint32m4_t test_vmadd_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vmadd_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, + size_t vl) { return __riscv_vmadd_vv_i32m4_tu(vd, vs1, vs2, vl); } -vint32m4_t test_vmadd_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vmadd_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, + size_t vl) { return __riscv_vmadd_vx_i32m4_tu(vd, rs1, vs2, vl); } -vint32m8_t test_vmadd_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vmadd_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, + size_t vl) { return __riscv_vmadd_vv_i32m8_tu(vd, vs1, vs2, vl); } -vint32m8_t test_vmadd_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vmadd_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, + size_t vl) { return __riscv_vmadd_vx_i32m8_tu(vd, rs1, vs2, vl); } -vint64m1_t test_vmadd_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vmadd_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, + size_t vl) { return __riscv_vmadd_vv_i64m1_tu(vd, vs1, vs2, vl); } -vint64m1_t test_vmadd_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vmadd_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, + size_t vl) { return __riscv_vmadd_vx_i64m1_tu(vd, rs1, vs2, vl); } -vint64m2_t test_vmadd_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vmadd_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, + size_t vl) { return __riscv_vmadd_vv_i64m2_tu(vd, vs1, vs2, vl); } -vint64m2_t test_vmadd_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vmadd_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, + size_t vl) { return __riscv_vmadd_vx_i64m2_tu(vd, rs1, vs2, vl); } -vint64m4_t test_vmadd_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vmadd_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, + size_t vl) { return __riscv_vmadd_vv_i64m4_tu(vd, vs1, vs2, vl); } -vint64m4_t test_vmadd_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vmadd_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, + size_t vl) { return __riscv_vmadd_vx_i64m4_tu(vd, rs1, vs2, vl); } -vint64m8_t test_vmadd_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vmadd_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, + size_t vl) { return __riscv_vmadd_vv_i64m8_tu(vd, vs1, vs2, vl); } -vint64m8_t test_vmadd_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vmadd_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, + size_t vl) { return __riscv_vmadd_vx_i64m8_tu(vd, rs1, vs2, vl); } -vuint8mf8_t test_vmadd_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vmadd_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, + vuint8mf8_t vs2, size_t vl) { return __riscv_vmadd_vv_u8mf8_tu(vd, vs1, vs2, vl); } -vuint8mf8_t test_vmadd_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vmadd_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, + size_t vl) { return __riscv_vmadd_vx_u8mf8_tu(vd, rs1, vs2, vl); } -vuint8mf4_t test_vmadd_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vmadd_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, + vuint8mf4_t vs2, size_t vl) { return __riscv_vmadd_vv_u8mf4_tu(vd, vs1, vs2, vl); } -vuint8mf4_t test_vmadd_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vmadd_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, + size_t vl) { return __riscv_vmadd_vx_u8mf4_tu(vd, rs1, vs2, vl); } -vuint8mf2_t test_vmadd_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vmadd_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, + vuint8mf2_t vs2, size_t vl) { return __riscv_vmadd_vv_u8mf2_tu(vd, vs1, vs2, vl); } -vuint8mf2_t test_vmadd_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vmadd_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, + size_t vl) { return __riscv_vmadd_vx_u8mf2_tu(vd, rs1, vs2, vl); } -vuint8m1_t test_vmadd_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vmadd_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, + size_t vl) { return __riscv_vmadd_vv_u8m1_tu(vd, vs1, vs2, vl); } -vuint8m1_t test_vmadd_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vmadd_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, + size_t vl) { return __riscv_vmadd_vx_u8m1_tu(vd, rs1, vs2, vl); } -vuint8m2_t test_vmadd_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vmadd_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, + size_t vl) { return __riscv_vmadd_vv_u8m2_tu(vd, vs1, vs2, vl); } -vuint8m2_t test_vmadd_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vmadd_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, + size_t vl) { return __riscv_vmadd_vx_u8m2_tu(vd, rs1, vs2, vl); } -vuint8m4_t test_vmadd_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vmadd_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, + size_t vl) { return __riscv_vmadd_vv_u8m4_tu(vd, vs1, vs2, vl); } -vuint8m4_t test_vmadd_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vmadd_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, + size_t vl) { return __riscv_vmadd_vx_u8m4_tu(vd, rs1, vs2, vl); } -vuint8m8_t test_vmadd_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vmadd_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, + size_t vl) { return __riscv_vmadd_vv_u8m8_tu(vd, vs1, vs2, vl); } -vuint8m8_t test_vmadd_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vmadd_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, + size_t vl) { return __riscv_vmadd_vx_u8m8_tu(vd, rs1, vs2, vl); } -vuint16mf4_t test_vmadd_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vmadd_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, + vuint16mf4_t vs2, size_t vl) { return __riscv_vmadd_vv_u16mf4_tu(vd, vs1, vs2, vl); } -vuint16mf4_t test_vmadd_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vmadd_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, + vuint16mf4_t vs2, size_t vl) { return __riscv_vmadd_vx_u16mf4_tu(vd, rs1, vs2, vl); } -vuint16mf2_t test_vmadd_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vmadd_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, + vuint16mf2_t vs2, size_t vl) { return __riscv_vmadd_vv_u16mf2_tu(vd, vs1, vs2, vl); } -vuint16mf2_t test_vmadd_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vmadd_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, + vuint16mf2_t vs2, size_t vl) { return __riscv_vmadd_vx_u16mf2_tu(vd, rs1, vs2, vl); } -vuint16m1_t test_vmadd_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vmadd_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, + vuint16m1_t vs2, size_t vl) { return __riscv_vmadd_vv_u16m1_tu(vd, vs1, vs2, vl); } -vuint16m1_t test_vmadd_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vmadd_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, + vuint16m1_t vs2, size_t vl) { return __riscv_vmadd_vx_u16m1_tu(vd, rs1, vs2, vl); } -vuint16m2_t test_vmadd_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vmadd_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vmadd_vv_u16m2_tu(vd, vs1, vs2, vl); } -vuint16m2_t test_vmadd_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vmadd_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vmadd_vx_u16m2_tu(vd, rs1, vs2, vl); } -vuint16m4_t test_vmadd_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vmadd_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vmadd_vv_u16m4_tu(vd, vs1, vs2, vl); } -vuint16m4_t test_vmadd_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vmadd_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vmadd_vx_u16m4_tu(vd, rs1, vs2, vl); } -vuint16m8_t test_vmadd_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vmadd_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, + vuint16m8_t vs2, size_t vl) { return __riscv_vmadd_vv_u16m8_tu(vd, vs1, vs2, vl); } -vuint16m8_t test_vmadd_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vmadd_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, + vuint16m8_t vs2, size_t vl) { return __riscv_vmadd_vx_u16m8_tu(vd, rs1, vs2, vl); } -vuint32mf2_t test_vmadd_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vmadd_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, + vuint32mf2_t vs2, size_t vl) { return __riscv_vmadd_vv_u32mf2_tu(vd, vs1, vs2, vl); } -vuint32mf2_t test_vmadd_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vmadd_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, + vuint32mf2_t vs2, size_t vl) { return __riscv_vmadd_vx_u32mf2_tu(vd, rs1, vs2, vl); } -vuint32m1_t test_vmadd_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vmadd_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, + vuint32m1_t vs2, size_t vl) { return __riscv_vmadd_vv_u32m1_tu(vd, vs1, vs2, vl); } -vuint32m1_t test_vmadd_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vmadd_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, + vuint32m1_t vs2, size_t vl) { return __riscv_vmadd_vx_u32m1_tu(vd, rs1, vs2, vl); } -vuint32m2_t test_vmadd_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vmadd_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, + vuint32m2_t vs2, size_t vl) { return __riscv_vmadd_vv_u32m2_tu(vd, vs1, vs2, vl); } -vuint32m2_t test_vmadd_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vmadd_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, + vuint32m2_t vs2, size_t vl) { return __riscv_vmadd_vx_u32m2_tu(vd, rs1, vs2, vl); } -vuint32m4_t test_vmadd_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vmadd_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vmadd_vv_u32m4_tu(vd, vs1, vs2, vl); } -vuint32m4_t test_vmadd_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vmadd_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vmadd_vx_u32m4_tu(vd, rs1, vs2, vl); } -vuint32m8_t test_vmadd_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vmadd_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, + vuint32m8_t vs2, size_t vl) { return __riscv_vmadd_vv_u32m8_tu(vd, vs1, vs2, vl); } -vuint32m8_t test_vmadd_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vmadd_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, + vuint32m8_t vs2, size_t vl) { return __riscv_vmadd_vx_u32m8_tu(vd, rs1, vs2, vl); } -vuint64m1_t test_vmadd_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vmadd_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, + vuint64m1_t vs2, size_t vl) { return __riscv_vmadd_vv_u64m1_tu(vd, vs1, vs2, vl); } -vuint64m1_t test_vmadd_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vmadd_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, + vuint64m1_t vs2, size_t vl) { return __riscv_vmadd_vx_u64m1_tu(vd, rs1, vs2, vl); } -vuint64m2_t test_vmadd_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vmadd_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, + vuint64m2_t vs2, size_t vl) { return __riscv_vmadd_vv_u64m2_tu(vd, vs1, vs2, vl); } -vuint64m2_t test_vmadd_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vmadd_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, + vuint64m2_t vs2, size_t vl) { return __riscv_vmadd_vx_u64m2_tu(vd, rs1, vs2, vl); } -vuint64m4_t test_vmadd_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vmadd_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, + vuint64m4_t vs2, size_t vl) { return __riscv_vmadd_vv_u64m4_tu(vd, vs1, vs2, vl); } -vuint64m4_t test_vmadd_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vmadd_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, + vuint64m4_t vs2, size_t vl) { return __riscv_vmadd_vx_u64m4_tu(vd, rs1, vs2, vl); } -vuint64m8_t test_vmadd_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vmadd_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, + vuint64m8_t vs2, size_t vl) { return __riscv_vmadd_vv_u64m8_tu(vd, vs1, vs2, vl); } -vuint64m8_t test_vmadd_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vmadd_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, + vuint64m8_t vs2, size_t vl) { return __riscv_vmadd_vx_u64m8_tu(vd, rs1, vs2, vl); } -vint8mf8_t test_vmadd_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vmadd_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, + vint8mf8_t vs2, size_t vl) { return __riscv_vmadd_vv_i8mf8_tum(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vmadd_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vmadd_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, int8_t rs1, + vint8mf8_t vs2, size_t vl) { return __riscv_vmadd_vx_i8mf8_tum(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vmadd_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vmadd_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, + vint8mf4_t vs2, size_t vl) { return __riscv_vmadd_vv_i8mf4_tum(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vmadd_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vmadd_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, int8_t rs1, + vint8mf4_t vs2, size_t vl) { return __riscv_vmadd_vx_i8mf4_tum(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vmadd_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vmadd_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vmadd_vv_i8mf2_tum(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vmadd_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vmadd_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, int8_t rs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vmadd_vx_i8mf2_tum(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vmadd_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vmadd_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, + vint8m1_t vs2, size_t vl) { return __riscv_vmadd_vv_i8m1_tum(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vmadd_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vmadd_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, int8_t rs1, + vint8m1_t vs2, size_t vl) { return __riscv_vmadd_vx_i8m1_tum(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vmadd_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vmadd_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, + vint8m2_t vs2, size_t vl) { return __riscv_vmadd_vv_i8m2_tum(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vmadd_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vmadd_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, int8_t rs1, + vint8m2_t vs2, size_t vl) { return __riscv_vmadd_vx_i8m2_tum(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vmadd_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vmadd_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, + vint8m4_t vs2, size_t vl) { return __riscv_vmadd_vv_i8m4_tum(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vmadd_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vmadd_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, int8_t rs1, + vint8m4_t vs2, size_t vl) { return __riscv_vmadd_vx_i8m4_tum(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vmadd_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vmadd_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, + vint8m8_t vs2, size_t vl) { return __riscv_vmadd_vv_i8m8_tum(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vmadd_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vmadd_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, int8_t rs1, + vint8m8_t vs2, size_t vl) { return __riscv_vmadd_vx_i8m8_tum(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vmadd_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vmadd_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs1, vint16mf4_t vs2, + size_t vl) { return __riscv_vmadd_vv_i16mf4_tum(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vmadd_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vmadd_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, int16_t rs1, + vint16mf4_t vs2, size_t vl) { return __riscv_vmadd_vx_i16mf4_tum(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vmadd_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vmadd_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs1, vint16mf2_t vs2, + size_t vl) { return __riscv_vmadd_vv_i16mf2_tum(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vmadd_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vmadd_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, int16_t rs1, + vint16mf2_t vs2, size_t vl) { return __riscv_vmadd_vx_i16mf2_tum(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vmadd_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vmadd_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, + vint16m1_t vs2, size_t vl) { return __riscv_vmadd_vv_i16m1_tum(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vmadd_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vmadd_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, int16_t rs1, + vint16m1_t vs2, size_t vl) { return __riscv_vmadd_vx_i16m1_tum(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vmadd_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vmadd_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, + vint16m2_t vs2, size_t vl) { return __riscv_vmadd_vv_i16m2_tum(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vmadd_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vmadd_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, int16_t rs1, + vint16m2_t vs2, size_t vl) { return __riscv_vmadd_vx_i16m2_tum(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vmadd_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vmadd_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, + vint16m4_t vs2, size_t vl) { return __riscv_vmadd_vv_i16m4_tum(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vmadd_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vmadd_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, int16_t rs1, + vint16m4_t vs2, size_t vl) { return __riscv_vmadd_vx_i16m4_tum(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vmadd_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vmadd_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, + vint16m8_t vs2, size_t vl) { return __riscv_vmadd_vv_i16m8_tum(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vmadd_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vmadd_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, int16_t rs1, + vint16m8_t vs2, size_t vl) { return __riscv_vmadd_vx_i16m8_tum(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vmadd_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vmadd_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs1, vint32mf2_t vs2, + size_t vl) { return __riscv_vmadd_vv_i32mf2_tum(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vmadd_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vmadd_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, int32_t rs1, + vint32mf2_t vs2, size_t vl) { return __riscv_vmadd_vx_i32mf2_tum(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vmadd_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vmadd_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, + vint32m1_t vs2, size_t vl) { return __riscv_vmadd_vv_i32m1_tum(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vmadd_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vmadd_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, int32_t rs1, + vint32m1_t vs2, size_t vl) { return __riscv_vmadd_vx_i32m1_tum(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vmadd_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vmadd_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, + vint32m2_t vs2, size_t vl) { return __riscv_vmadd_vv_i32m2_tum(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vmadd_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vmadd_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, int32_t rs1, + vint32m2_t vs2, size_t vl) { return __riscv_vmadd_vx_i32m2_tum(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vmadd_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vmadd_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, + vint32m4_t vs2, size_t vl) { return __riscv_vmadd_vv_i32m4_tum(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vmadd_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vmadd_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, int32_t rs1, + vint32m4_t vs2, size_t vl) { return __riscv_vmadd_vx_i32m4_tum(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vmadd_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vmadd_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, + vint32m8_t vs2, size_t vl) { return __riscv_vmadd_vv_i32m8_tum(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vmadd_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vmadd_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, int32_t rs1, + vint32m8_t vs2, size_t vl) { return __riscv_vmadd_vx_i32m8_tum(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vmadd_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vmadd_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, + vint64m1_t vs2, size_t vl) { return __riscv_vmadd_vv_i64m1_tum(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vmadd_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vmadd_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, int64_t rs1, + vint64m1_t vs2, size_t vl) { return __riscv_vmadd_vx_i64m1_tum(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vmadd_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vmadd_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, + vint64m2_t vs2, size_t vl) { return __riscv_vmadd_vv_i64m2_tum(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vmadd_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vmadd_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, int64_t rs1, + vint64m2_t vs2, size_t vl) { return __riscv_vmadd_vx_i64m2_tum(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vmadd_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vmadd_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, + vint64m4_t vs2, size_t vl) { return __riscv_vmadd_vv_i64m4_tum(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vmadd_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vmadd_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, int64_t rs1, + vint64m4_t vs2, size_t vl) { return __riscv_vmadd_vx_i64m4_tum(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vmadd_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vmadd_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, + vint64m8_t vs2, size_t vl) { return __riscv_vmadd_vv_i64m8_tum(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vmadd_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vmadd_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, int64_t rs1, + vint64m8_t vs2, size_t vl) { return __riscv_vmadd_vx_i64m8_tum(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vmadd_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vmadd_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs1, vuint8mf8_t vs2, + size_t vl) { return __riscv_vmadd_vv_u8mf8_tum(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vmadd_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vmadd_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, + vuint8mf8_t vs2, size_t vl) { return __riscv_vmadd_vx_u8mf8_tum(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vmadd_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vmadd_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs1, vuint8mf4_t vs2, + size_t vl) { return __riscv_vmadd_vv_u8mf4_tum(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vmadd_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vmadd_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, + vuint8mf4_t vs2, size_t vl) { return __riscv_vmadd_vx_u8mf4_tum(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vmadd_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vmadd_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs1, vuint8mf2_t vs2, + size_t vl) { return __riscv_vmadd_vv_u8mf2_tum(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vmadd_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vmadd_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, + vuint8mf2_t vs2, size_t vl) { return __riscv_vmadd_vx_u8mf2_tum(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vmadd_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vmadd_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vmadd_vv_u8m1_tum(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vmadd_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vmadd_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vmadd_vx_u8m1_tum(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vmadd_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vmadd_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vmadd_vv_u8m2_tum(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vmadd_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vmadd_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vmadd_vx_u8m2_tum(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vmadd_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vmadd_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vmadd_vv_u8m4_tum(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vmadd_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vmadd_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vmadd_vx_u8m4_tum(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vmadd_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vmadd_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, + vuint8m8_t vs2, size_t vl) { return __riscv_vmadd_vv_u8m8_tum(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vmadd_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vmadd_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, + vuint8m8_t vs2, size_t vl) { return __riscv_vmadd_vx_u8m8_tum(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vmadd_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vmadd_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vmadd_vv_u16mf4_tum(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vmadd_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vmadd_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + uint16_t rs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vmadd_vx_u16mf4_tum(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vmadd_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vmadd_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vmadd_vv_u16mf2_tum(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vmadd_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vmadd_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + uint16_t rs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vmadd_vx_u16mf2_tum(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vmadd_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vmadd_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs1, vuint16m1_t vs2, + size_t vl) { return __riscv_vmadd_vv_u16m1_tum(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vmadd_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vmadd_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, + vuint16m1_t vs2, size_t vl) { return __riscv_vmadd_vx_u16m1_tum(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vmadd_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vmadd_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs1, vuint16m2_t vs2, + size_t vl) { return __riscv_vmadd_vv_u16m2_tum(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vmadd_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vmadd_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vmadd_vx_u16m2_tum(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vmadd_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vmadd_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs1, vuint16m4_t vs2, + size_t vl) { return __riscv_vmadd_vv_u16m4_tum(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vmadd_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vmadd_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vmadd_vx_u16m4_tum(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vmadd_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vmadd_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs1, vuint16m8_t vs2, + size_t vl) { return __riscv_vmadd_vv_u16m8_tum(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vmadd_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vmadd_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, + vuint16m8_t vs2, size_t vl) { return __riscv_vmadd_vx_u16m8_tum(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vmadd_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vmadd_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vmadd_vv_u32mf2_tum(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vmadd_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vmadd_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + uint32_t rs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vmadd_vx_u32mf2_tum(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vmadd_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vmadd_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs1, vuint32m1_t vs2, + size_t vl) { return __riscv_vmadd_vv_u32m1_tum(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vmadd_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vmadd_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, + vuint32m1_t vs2, size_t vl) { return __riscv_vmadd_vx_u32m1_tum(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vmadd_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vmadd_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs1, vuint32m2_t vs2, + size_t vl) { return __riscv_vmadd_vv_u32m2_tum(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vmadd_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vmadd_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, + vuint32m2_t vs2, size_t vl) { return __riscv_vmadd_vx_u32m2_tum(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vmadd_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vmadd_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs1, vuint32m4_t vs2, + size_t vl) { return __riscv_vmadd_vv_u32m4_tum(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vmadd_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vmadd_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vmadd_vx_u32m4_tum(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vmadd_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vmadd_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs1, vuint32m8_t vs2, + size_t vl) { return __riscv_vmadd_vv_u32m8_tum(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vmadd_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vmadd_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, + vuint32m8_t vs2, size_t vl) { return __riscv_vmadd_vx_u32m8_tum(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vmadd_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vmadd_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs1, vuint64m1_t vs2, + size_t vl) { return __riscv_vmadd_vv_u64m1_tum(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vmadd_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vmadd_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, + vuint64m1_t vs2, size_t vl) { return __riscv_vmadd_vx_u64m1_tum(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vmadd_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vmadd_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs1, vuint64m2_t vs2, + size_t vl) { return __riscv_vmadd_vv_u64m2_tum(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vmadd_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vmadd_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, + vuint64m2_t vs2, size_t vl) { return __riscv_vmadd_vx_u64m2_tum(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vmadd_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vmadd_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs1, vuint64m4_t vs2, + size_t vl) { return __riscv_vmadd_vv_u64m4_tum(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vmadd_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vmadd_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, + vuint64m4_t vs2, size_t vl) { return __riscv_vmadd_vx_u64m4_tum(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vmadd_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vmadd_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs1, vuint64m8_t vs2, + size_t vl) { return __riscv_vmadd_vv_u64m8_tum(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vmadd_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vmadd_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, + vuint64m8_t vs2, size_t vl) { return __riscv_vmadd_vx_u64m8_tum(vm, vd, rs1, vs2, vl); } -vint8mf8_t test_vmadd_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vmadd_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, + vint8mf8_t vs2, size_t vl) { return __riscv_vmadd_vv_i8mf8_tumu(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vmadd_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vmadd_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, + vint8mf8_t vs2, size_t vl) { return __riscv_vmadd_vx_i8mf8_tumu(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vmadd_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vmadd_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, + vint8mf4_t vs2, size_t vl) { return __riscv_vmadd_vv_i8mf4_tumu(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vmadd_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vmadd_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, + vint8mf4_t vs2, size_t vl) { return __riscv_vmadd_vx_i8mf4_tumu(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vmadd_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vmadd_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vmadd_vv_i8mf2_tumu(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vmadd_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vmadd_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vmadd_vx_i8mf2_tumu(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vmadd_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vmadd_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, + vint8m1_t vs2, size_t vl) { return __riscv_vmadd_vv_i8m1_tumu(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vmadd_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vmadd_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, int8_t rs1, + vint8m1_t vs2, size_t vl) { return __riscv_vmadd_vx_i8m1_tumu(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vmadd_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vmadd_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, + vint8m2_t vs2, size_t vl) { return __riscv_vmadd_vv_i8m2_tumu(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vmadd_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vmadd_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, int8_t rs1, + vint8m2_t vs2, size_t vl) { return __riscv_vmadd_vx_i8m2_tumu(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vmadd_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vmadd_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, + vint8m4_t vs2, size_t vl) { return __riscv_vmadd_vv_i8m4_tumu(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vmadd_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vmadd_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, int8_t rs1, + vint8m4_t vs2, size_t vl) { return __riscv_vmadd_vx_i8m4_tumu(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vmadd_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vmadd_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, + vint8m8_t vs2, size_t vl) { return __riscv_vmadd_vv_i8m8_tumu(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vmadd_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vmadd_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, int8_t rs1, + vint8m8_t vs2, size_t vl) { return __riscv_vmadd_vx_i8m8_tumu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vmadd_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vmadd_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs1, vint16mf4_t vs2, + size_t vl) { return __riscv_vmadd_vv_i16mf4_tumu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vmadd_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vmadd_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, int16_t rs1, + vint16mf4_t vs2, size_t vl) { return __riscv_vmadd_vx_i16mf4_tumu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vmadd_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vmadd_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs1, vint16mf2_t vs2, + size_t vl) { return __riscv_vmadd_vv_i16mf2_tumu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vmadd_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vmadd_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, int16_t rs1, + vint16mf2_t vs2, size_t vl) { return __riscv_vmadd_vx_i16mf2_tumu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vmadd_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vmadd_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, + vint16m1_t vs2, size_t vl) { return __riscv_vmadd_vv_i16m1_tumu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vmadd_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vmadd_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, int16_t rs1, + vint16m1_t vs2, size_t vl) { return __riscv_vmadd_vx_i16m1_tumu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vmadd_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vmadd_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, + vint16m2_t vs2, size_t vl) { return __riscv_vmadd_vv_i16m2_tumu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vmadd_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vmadd_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, int16_t rs1, + vint16m2_t vs2, size_t vl) { return __riscv_vmadd_vx_i16m2_tumu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vmadd_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vmadd_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, + vint16m4_t vs2, size_t vl) { return __riscv_vmadd_vv_i16m4_tumu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vmadd_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vmadd_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, int16_t rs1, + vint16m4_t vs2, size_t vl) { return __riscv_vmadd_vx_i16m4_tumu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vmadd_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vmadd_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, + vint16m8_t vs2, size_t vl) { return __riscv_vmadd_vv_i16m8_tumu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vmadd_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vmadd_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, int16_t rs1, + vint16m8_t vs2, size_t vl) { return __riscv_vmadd_vx_i16m8_tumu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vmadd_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vmadd_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs1, vint32mf2_t vs2, + size_t vl) { return __riscv_vmadd_vv_i32mf2_tumu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vmadd_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vmadd_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, int32_t rs1, + vint32mf2_t vs2, size_t vl) { return __riscv_vmadd_vx_i32mf2_tumu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vmadd_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vmadd_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, + vint32m1_t vs2, size_t vl) { return __riscv_vmadd_vv_i32m1_tumu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vmadd_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vmadd_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, int32_t rs1, + vint32m1_t vs2, size_t vl) { return __riscv_vmadd_vx_i32m1_tumu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vmadd_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vmadd_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, + vint32m2_t vs2, size_t vl) { return __riscv_vmadd_vv_i32m2_tumu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vmadd_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vmadd_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, int32_t rs1, + vint32m2_t vs2, size_t vl) { return __riscv_vmadd_vx_i32m2_tumu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vmadd_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vmadd_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, + vint32m4_t vs2, size_t vl) { return __riscv_vmadd_vv_i32m4_tumu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vmadd_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vmadd_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, int32_t rs1, + vint32m4_t vs2, size_t vl) { return __riscv_vmadd_vx_i32m4_tumu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vmadd_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vmadd_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, + vint32m8_t vs2, size_t vl) { return __riscv_vmadd_vv_i32m8_tumu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vmadd_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vmadd_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, int32_t rs1, + vint32m8_t vs2, size_t vl) { return __riscv_vmadd_vx_i32m8_tumu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vmadd_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vmadd_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, + vint64m1_t vs2, size_t vl) { return __riscv_vmadd_vv_i64m1_tumu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vmadd_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vmadd_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, int64_t rs1, + vint64m1_t vs2, size_t vl) { return __riscv_vmadd_vx_i64m1_tumu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vmadd_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vmadd_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, + vint64m2_t vs2, size_t vl) { return __riscv_vmadd_vv_i64m2_tumu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vmadd_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vmadd_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, int64_t rs1, + vint64m2_t vs2, size_t vl) { return __riscv_vmadd_vx_i64m2_tumu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vmadd_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vmadd_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, + vint64m4_t vs2, size_t vl) { return __riscv_vmadd_vv_i64m4_tumu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vmadd_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vmadd_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, int64_t rs1, + vint64m4_t vs2, size_t vl) { return __riscv_vmadd_vx_i64m4_tumu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vmadd_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vmadd_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, + vint64m8_t vs2, size_t vl) { return __riscv_vmadd_vv_i64m8_tumu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vmadd_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vmadd_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, int64_t rs1, + vint64m8_t vs2, size_t vl) { return __riscv_vmadd_vx_i64m8_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vmadd_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vmadd_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs1, vuint8mf8_t vs2, + size_t vl) { return __riscv_vmadd_vv_u8mf8_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vmadd_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vmadd_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, + vuint8mf8_t vs2, size_t vl) { return __riscv_vmadd_vx_u8mf8_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vmadd_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vmadd_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs1, vuint8mf4_t vs2, + size_t vl) { return __riscv_vmadd_vv_u8mf4_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vmadd_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vmadd_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, + vuint8mf4_t vs2, size_t vl) { return __riscv_vmadd_vx_u8mf4_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vmadd_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vmadd_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs1, vuint8mf2_t vs2, + size_t vl) { return __riscv_vmadd_vv_u8mf2_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vmadd_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vmadd_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, + vuint8mf2_t vs2, size_t vl) { return __riscv_vmadd_vx_u8mf2_tumu(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vmadd_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vmadd_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vmadd_vv_u8m1_tumu(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vmadd_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vmadd_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vmadd_vx_u8m1_tumu(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vmadd_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vmadd_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vmadd_vv_u8m2_tumu(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vmadd_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vmadd_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vmadd_vx_u8m2_tumu(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vmadd_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vmadd_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vmadd_vv_u8m4_tumu(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vmadd_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vmadd_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vmadd_vx_u8m4_tumu(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vmadd_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vmadd_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, + vuint8m8_t vs2, size_t vl) { return __riscv_vmadd_vv_u8m8_tumu(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vmadd_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vmadd_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, + vuint8m8_t vs2, size_t vl) { return __riscv_vmadd_vx_u8m8_tumu(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vmadd_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vmadd_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vmadd_vv_u16mf4_tumu(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vmadd_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vmadd_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + uint16_t rs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vmadd_vx_u16mf4_tumu(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vmadd_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vmadd_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vmadd_vv_u16mf2_tumu(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vmadd_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vmadd_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + uint16_t rs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vmadd_vx_u16mf2_tumu(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vmadd_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vmadd_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs1, vuint16m1_t vs2, + size_t vl) { return __riscv_vmadd_vv_u16m1_tumu(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vmadd_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vmadd_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, + vuint16m1_t vs2, size_t vl) { return __riscv_vmadd_vx_u16m1_tumu(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vmadd_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vmadd_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs1, vuint16m2_t vs2, + size_t vl) { return __riscv_vmadd_vv_u16m2_tumu(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vmadd_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vmadd_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vmadd_vx_u16m2_tumu(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vmadd_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vmadd_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs1, vuint16m4_t vs2, + size_t vl) { return __riscv_vmadd_vv_u16m4_tumu(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vmadd_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vmadd_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vmadd_vx_u16m4_tumu(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vmadd_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vmadd_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs1, vuint16m8_t vs2, + size_t vl) { return __riscv_vmadd_vv_u16m8_tumu(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vmadd_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vmadd_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, + vuint16m8_t vs2, size_t vl) { return __riscv_vmadd_vx_u16m8_tumu(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vmadd_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vmadd_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vmadd_vv_u32mf2_tumu(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vmadd_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vmadd_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + uint32_t rs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vmadd_vx_u32mf2_tumu(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vmadd_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vmadd_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs1, vuint32m1_t vs2, + size_t vl) { return __riscv_vmadd_vv_u32m1_tumu(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vmadd_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vmadd_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, + vuint32m1_t vs2, size_t vl) { return __riscv_vmadd_vx_u32m1_tumu(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vmadd_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vmadd_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs1, vuint32m2_t vs2, + size_t vl) { return __riscv_vmadd_vv_u32m2_tumu(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vmadd_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vmadd_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, + vuint32m2_t vs2, size_t vl) { return __riscv_vmadd_vx_u32m2_tumu(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vmadd_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vmadd_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs1, vuint32m4_t vs2, + size_t vl) { return __riscv_vmadd_vv_u32m4_tumu(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vmadd_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vmadd_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vmadd_vx_u32m4_tumu(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vmadd_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vmadd_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs1, vuint32m8_t vs2, + size_t vl) { return __riscv_vmadd_vv_u32m8_tumu(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vmadd_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vmadd_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, + vuint32m8_t vs2, size_t vl) { return __riscv_vmadd_vx_u32m8_tumu(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vmadd_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vmadd_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs1, vuint64m1_t vs2, + size_t vl) { return __riscv_vmadd_vv_u64m1_tumu(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vmadd_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vmadd_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, + vuint64m1_t vs2, size_t vl) { return __riscv_vmadd_vx_u64m1_tumu(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vmadd_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vmadd_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs1, vuint64m2_t vs2, + size_t vl) { return __riscv_vmadd_vv_u64m2_tumu(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vmadd_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vmadd_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, + vuint64m2_t vs2, size_t vl) { return __riscv_vmadd_vx_u64m2_tumu(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vmadd_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vmadd_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs1, vuint64m4_t vs2, + size_t vl) { return __riscv_vmadd_vv_u64m4_tumu(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vmadd_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vmadd_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, + vuint64m4_t vs2, size_t vl) { return __riscv_vmadd_vx_u64m4_tumu(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vmadd_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vmadd_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs1, vuint64m8_t vs2, + size_t vl) { return __riscv_vmadd_vv_u64m8_tumu(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vmadd_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vmadd_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, + vuint64m8_t vs2, size_t vl) { return __riscv_vmadd_vx_u64m8_tumu(vm, vd, rs1, vs2, vl); } -vint8mf8_t test_vmadd_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vmadd_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, + vint8mf8_t vs2, size_t vl) { return __riscv_vmadd_vv_i8mf8_mu(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vmadd_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vmadd_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, + vint8mf8_t vs2, size_t vl) { return __riscv_vmadd_vx_i8mf8_mu(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vmadd_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vmadd_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, + vint8mf4_t vs2, size_t vl) { return __riscv_vmadd_vv_i8mf4_mu(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vmadd_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vmadd_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, + vint8mf4_t vs2, size_t vl) { return __riscv_vmadd_vx_i8mf4_mu(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vmadd_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vmadd_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vmadd_vv_i8mf2_mu(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vmadd_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vmadd_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vmadd_vx_i8mf2_mu(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vmadd_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vmadd_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, + vint8m1_t vs2, size_t vl) { return __riscv_vmadd_vv_i8m1_mu(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vmadd_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vmadd_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, int8_t rs1, + vint8m1_t vs2, size_t vl) { return __riscv_vmadd_vx_i8m1_mu(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vmadd_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vmadd_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, + vint8m2_t vs2, size_t vl) { return __riscv_vmadd_vv_i8m2_mu(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vmadd_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vmadd_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, int8_t rs1, + vint8m2_t vs2, size_t vl) { return __riscv_vmadd_vx_i8m2_mu(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vmadd_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vmadd_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, + vint8m4_t vs2, size_t vl) { return __riscv_vmadd_vv_i8m4_mu(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vmadd_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vmadd_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, int8_t rs1, + vint8m4_t vs2, size_t vl) { return __riscv_vmadd_vx_i8m4_mu(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vmadd_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vmadd_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, + vint8m8_t vs2, size_t vl) { return __riscv_vmadd_vv_i8m8_mu(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vmadd_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vmadd_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, int8_t rs1, + vint8m8_t vs2, size_t vl) { return __riscv_vmadd_vx_i8m8_mu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vmadd_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vmadd_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs1, vint16mf4_t vs2, + size_t vl) { return __riscv_vmadd_vv_i16mf4_mu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vmadd_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vmadd_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, int16_t rs1, + vint16mf4_t vs2, size_t vl) { return __riscv_vmadd_vx_i16mf4_mu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vmadd_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vmadd_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs1, vint16mf2_t vs2, + size_t vl) { return __riscv_vmadd_vv_i16mf2_mu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vmadd_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vmadd_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, int16_t rs1, + vint16mf2_t vs2, size_t vl) { return __riscv_vmadd_vx_i16mf2_mu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vmadd_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vmadd_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, + vint16m1_t vs2, size_t vl) { return __riscv_vmadd_vv_i16m1_mu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vmadd_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vmadd_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, int16_t rs1, + vint16m1_t vs2, size_t vl) { return __riscv_vmadd_vx_i16m1_mu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vmadd_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vmadd_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, + vint16m2_t vs2, size_t vl) { return __riscv_vmadd_vv_i16m2_mu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vmadd_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vmadd_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, int16_t rs1, + vint16m2_t vs2, size_t vl) { return __riscv_vmadd_vx_i16m2_mu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vmadd_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vmadd_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, + vint16m4_t vs2, size_t vl) { return __riscv_vmadd_vv_i16m4_mu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vmadd_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vmadd_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, int16_t rs1, + vint16m4_t vs2, size_t vl) { return __riscv_vmadd_vx_i16m4_mu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vmadd_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vmadd_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, + vint16m8_t vs2, size_t vl) { return __riscv_vmadd_vv_i16m8_mu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vmadd_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vmadd_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, int16_t rs1, + vint16m8_t vs2, size_t vl) { return __riscv_vmadd_vx_i16m8_mu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vmadd_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vmadd_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs1, vint32mf2_t vs2, + size_t vl) { return __riscv_vmadd_vv_i32mf2_mu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vmadd_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vmadd_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, int32_t rs1, + vint32mf2_t vs2, size_t vl) { return __riscv_vmadd_vx_i32mf2_mu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vmadd_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vmadd_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, + vint32m1_t vs2, size_t vl) { return __riscv_vmadd_vv_i32m1_mu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vmadd_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vmadd_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, int32_t rs1, + vint32m1_t vs2, size_t vl) { return __riscv_vmadd_vx_i32m1_mu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vmadd_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vmadd_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, + vint32m2_t vs2, size_t vl) { return __riscv_vmadd_vv_i32m2_mu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vmadd_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vmadd_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, int32_t rs1, + vint32m2_t vs2, size_t vl) { return __riscv_vmadd_vx_i32m2_mu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vmadd_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vmadd_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, + vint32m4_t vs2, size_t vl) { return __riscv_vmadd_vv_i32m4_mu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vmadd_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vmadd_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, int32_t rs1, + vint32m4_t vs2, size_t vl) { return __riscv_vmadd_vx_i32m4_mu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vmadd_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vmadd_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, + vint32m8_t vs2, size_t vl) { return __riscv_vmadd_vv_i32m8_mu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vmadd_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vmadd_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, int32_t rs1, + vint32m8_t vs2, size_t vl) { return __riscv_vmadd_vx_i32m8_mu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vmadd_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vmadd_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, + vint64m1_t vs2, size_t vl) { return __riscv_vmadd_vv_i64m1_mu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vmadd_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vmadd_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, int64_t rs1, + vint64m1_t vs2, size_t vl) { return __riscv_vmadd_vx_i64m1_mu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vmadd_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vmadd_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, + vint64m2_t vs2, size_t vl) { return __riscv_vmadd_vv_i64m2_mu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vmadd_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vmadd_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, int64_t rs1, + vint64m2_t vs2, size_t vl) { return __riscv_vmadd_vx_i64m2_mu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vmadd_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vmadd_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, + vint64m4_t vs2, size_t vl) { return __riscv_vmadd_vv_i64m4_mu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vmadd_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vmadd_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, int64_t rs1, + vint64m4_t vs2, size_t vl) { return __riscv_vmadd_vx_i64m4_mu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vmadd_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vmadd_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, + vint64m8_t vs2, size_t vl) { return __riscv_vmadd_vv_i64m8_mu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vmadd_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vmadd_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, int64_t rs1, + vint64m8_t vs2, size_t vl) { return __riscv_vmadd_vx_i64m8_mu(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vmadd_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vmadd_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs1, vuint8mf8_t vs2, + size_t vl) { return __riscv_vmadd_vv_u8mf8_mu(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vmadd_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vmadd_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, + vuint8mf8_t vs2, size_t vl) { return __riscv_vmadd_vx_u8mf8_mu(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vmadd_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vmadd_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs1, vuint8mf4_t vs2, + size_t vl) { return __riscv_vmadd_vv_u8mf4_mu(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vmadd_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vmadd_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, + vuint8mf4_t vs2, size_t vl) { return __riscv_vmadd_vx_u8mf4_mu(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vmadd_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vmadd_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs1, vuint8mf2_t vs2, + size_t vl) { return __riscv_vmadd_vv_u8mf2_mu(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vmadd_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vmadd_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, + vuint8mf2_t vs2, size_t vl) { return __riscv_vmadd_vx_u8mf2_mu(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vmadd_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vmadd_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vmadd_vv_u8m1_mu(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vmadd_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vmadd_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vmadd_vx_u8m1_mu(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vmadd_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vmadd_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vmadd_vv_u8m2_mu(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vmadd_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vmadd_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vmadd_vx_u8m2_mu(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vmadd_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vmadd_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vmadd_vv_u8m4_mu(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vmadd_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vmadd_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vmadd_vx_u8m4_mu(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vmadd_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vmadd_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, + vuint8m8_t vs2, size_t vl) { return __riscv_vmadd_vv_u8m8_mu(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vmadd_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vmadd_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, + vuint8m8_t vs2, size_t vl) { return __riscv_vmadd_vx_u8m8_mu(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vmadd_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vmadd_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vmadd_vv_u16mf4_mu(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vmadd_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vmadd_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + uint16_t rs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vmadd_vx_u16mf4_mu(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vmadd_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vmadd_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vmadd_vv_u16mf2_mu(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vmadd_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vmadd_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + uint16_t rs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vmadd_vx_u16mf2_mu(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vmadd_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vmadd_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs1, vuint16m1_t vs2, + size_t vl) { return __riscv_vmadd_vv_u16m1_mu(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vmadd_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vmadd_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, + vuint16m1_t vs2, size_t vl) { return __riscv_vmadd_vx_u16m1_mu(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vmadd_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vmadd_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vmadd_vv_u16m2_mu(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vmadd_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vmadd_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vmadd_vx_u16m2_mu(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vmadd_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vmadd_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vmadd_vv_u16m4_mu(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vmadd_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vmadd_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vmadd_vx_u16m4_mu(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vmadd_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vmadd_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, + vuint16m8_t vs2, size_t vl) { return __riscv_vmadd_vv_u16m8_mu(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vmadd_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vmadd_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, + vuint16m8_t vs2, size_t vl) { return __riscv_vmadd_vx_u16m8_mu(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vmadd_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vmadd_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vmadd_vv_u32mf2_mu(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vmadd_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vmadd_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + uint32_t rs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vmadd_vx_u32mf2_mu(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vmadd_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vmadd_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs1, vuint32m1_t vs2, + size_t vl) { return __riscv_vmadd_vv_u32m1_mu(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vmadd_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vmadd_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, + vuint32m1_t vs2, size_t vl) { return __riscv_vmadd_vx_u32m1_mu(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vmadd_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vmadd_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs1, vuint32m2_t vs2, + size_t vl) { return __riscv_vmadd_vv_u32m2_mu(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vmadd_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vmadd_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, + vuint32m2_t vs2, size_t vl) { return __riscv_vmadd_vx_u32m2_mu(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vmadd_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vmadd_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vmadd_vv_u32m4_mu(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vmadd_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vmadd_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vmadd_vx_u32m4_mu(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vmadd_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vmadd_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, + vuint32m8_t vs2, size_t vl) { return __riscv_vmadd_vv_u32m8_mu(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vmadd_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vmadd_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, + vuint32m8_t vs2, size_t vl) { return __riscv_vmadd_vx_u32m8_mu(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vmadd_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vmadd_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs1, vuint64m1_t vs2, + size_t vl) { return __riscv_vmadd_vv_u64m1_mu(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vmadd_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vmadd_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, + vuint64m1_t vs2, size_t vl) { return __riscv_vmadd_vx_u64m1_mu(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vmadd_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vmadd_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs1, vuint64m2_t vs2, + size_t vl) { return __riscv_vmadd_vv_u64m2_mu(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vmadd_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vmadd_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, + vuint64m2_t vs2, size_t vl) { return __riscv_vmadd_vx_u64m2_mu(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vmadd_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vmadd_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs1, vuint64m4_t vs2, + size_t vl) { return __riscv_vmadd_vv_u64m4_mu(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vmadd_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vmadd_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, + vuint64m4_t vs2, size_t vl) { return __riscv_vmadd_vx_u64m4_mu(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vmadd_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vmadd_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, + vuint64m8_t vs2, size_t vl) { return __riscv_vmadd_vv_u64m8_mu(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vmadd_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vmadd_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, + vuint64m8_t vs2, size_t vl) { return __riscv_vmadd_vx_u64m8_mu(vm, vd, rs1, vs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vmax.c b/auto-generated/policy_funcs/llvm-api-tests/vmax.c index 2d4018a4f..48deb0d19 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vmax.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vmax.c @@ -5,706 +5,891 @@ #include -vint8mf8_t test_vmax_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vmax_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, + size_t vl) { return __riscv_vmax_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vmax_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vmax_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vmax_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vmax_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vmax_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, + size_t vl) { return __riscv_vmax_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vmax_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vmax_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vmax_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vmax_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vmax_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, + size_t vl) { return __riscv_vmax_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vmax_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vmax_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vmax_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vmax_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vmax_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vmax_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vmax_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vmax_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, + size_t vl) { return __riscv_vmax_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vmax_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vmax_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, + size_t vl) { return __riscv_vmax_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vmax_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vmax_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vmax_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vmax_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vmax_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, + size_t vl) { return __riscv_vmax_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vmax_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vmax_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vmax_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vmax_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vmax_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, + size_t vl) { return __riscv_vmax_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vmax_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vmax_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vmax_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vmax_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vmax_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + vint16mf4_t vs1, size_t vl) { return __riscv_vmax_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vmax_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vmax_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vmax_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vmax_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vmax_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vmax_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vmax_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vmax_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vmax_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vmax_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vmax_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vmax_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vmax_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vmax_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, + size_t vl) { return __riscv_vmax_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vmax_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vmax_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, + size_t vl) { return __riscv_vmax_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vmax_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vmax_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vmax_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vmax_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vmax_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, + size_t vl) { return __riscv_vmax_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vmax_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vmax_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vmax_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vmax_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vmax_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, + size_t vl) { return __riscv_vmax_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vmax_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vmax_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, + size_t vl) { return __riscv_vmax_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vmax_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vmax_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vmax_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vmax_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vmax_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vmax_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vmax_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vmax_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vmax_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vmax_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vmax_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, + size_t vl) { return __riscv_vmax_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vmax_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vmax_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, + size_t vl) { return __riscv_vmax_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vmax_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vmax_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vmax_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vmax_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vmax_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, + size_t vl) { return __riscv_vmax_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vmax_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vmax_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, + size_t vl) { return __riscv_vmax_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vmax_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vmax_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, + size_t vl) { return __riscv_vmax_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vmax_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vmax_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, + size_t vl) { return __riscv_vmax_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vmax_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vmax_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vmax_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vmax_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vmax_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, + size_t vl) { return __riscv_vmax_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vmax_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vmax_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, + size_t vl) { return __riscv_vmax_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vmax_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vmax_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, + size_t vl) { return __riscv_vmax_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vmax_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vmax_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, + size_t vl) { return __riscv_vmax_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vmax_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vmax_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, + size_t vl) { return __riscv_vmax_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vmax_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vmax_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, + size_t vl) { return __riscv_vmax_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vmax_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vmax_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, + size_t vl) { return __riscv_vmax_vx_i64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vmax_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vmax_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vmax_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmax_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vmax_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmax_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmax_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vmax_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vmax_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmax_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vmax_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmax_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmax_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vmax_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vmax_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmax_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vmax_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmax_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmax_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vmax_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vmax_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmax_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vmax_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmax_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmax_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vmax_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vmax_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmax_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vmax_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmax_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmax_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vmax_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vmax_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmax_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vmax_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmax_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmax_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vmax_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vmax_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmax_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vmax_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmax_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmax_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vmax_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vmax_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmax_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vmax_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vmax_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmax_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vmax_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vmax_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmax_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vmax_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vmax_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmax_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vmax_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vmax_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmax_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vmax_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmax_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmax_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vmax_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vmax_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmax_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vmax_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmax_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmax_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vmax_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vmax_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmax_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vmax_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmax_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmax_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vmax_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vmax_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmax_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vmax_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmax_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmax_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vmax_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vmax_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmax_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vmax_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vmax_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmax_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vmax_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vmax_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmax_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vmax_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmax_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmax_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vmax_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vmax_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmax_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vmax_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmax_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmax_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vmax_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vmax_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmax_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vmax_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmax_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmax_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vmax_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vmax_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmax_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vmax_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmax_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmax_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vmax_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vmax_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmax_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vmax_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmax_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmax_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vmax_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vmax_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmax_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vmax_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmax_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmax_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vmax_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vmax_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmax_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vmax_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmax_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmax_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vmax_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vmax_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmax_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vmax_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmax_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vmax_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vmax_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vmax_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmax_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vmax_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmax_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmax_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vmax_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vmax_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmax_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vmax_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmax_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmax_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vmax_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vmax_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmax_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vmax_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmax_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmax_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vmax_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vmax_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmax_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vmax_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmax_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmax_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vmax_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vmax_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmax_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vmax_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmax_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmax_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vmax_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vmax_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmax_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vmax_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmax_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmax_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vmax_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vmax_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmax_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vmax_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmax_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmax_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vmax_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vmax_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmax_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vmax_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vmax_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmax_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vmax_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vmax_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmax_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vmax_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vmax_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmax_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vmax_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vmax_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmax_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vmax_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmax_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmax_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vmax_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vmax_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmax_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vmax_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmax_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmax_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vmax_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vmax_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmax_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vmax_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmax_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmax_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vmax_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vmax_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmax_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vmax_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmax_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmax_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vmax_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vmax_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmax_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vmax_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vmax_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmax_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vmax_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vmax_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmax_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vmax_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmax_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmax_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vmax_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vmax_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmax_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vmax_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmax_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmax_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vmax_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vmax_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmax_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vmax_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmax_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmax_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vmax_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vmax_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmax_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vmax_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmax_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmax_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vmax_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vmax_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmax_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vmax_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmax_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmax_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vmax_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vmax_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmax_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vmax_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmax_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmax_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vmax_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vmax_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmax_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vmax_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmax_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmax_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vmax_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vmax_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmax_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vmax_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmax_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vmax_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vmax_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vmax_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmax_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vmax_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmax_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmax_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vmax_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vmax_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmax_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vmax_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmax_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmax_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vmax_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vmax_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmax_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vmax_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmax_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmax_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vmax_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vmax_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmax_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vmax_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmax_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmax_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vmax_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vmax_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmax_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vmax_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmax_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmax_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vmax_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vmax_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmax_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vmax_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmax_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmax_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vmax_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vmax_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmax_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vmax_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmax_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmax_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vmax_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vmax_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmax_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vmax_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vmax_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmax_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vmax_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vmax_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmax_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vmax_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vmax_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmax_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vmax_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vmax_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmax_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vmax_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmax_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmax_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vmax_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vmax_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmax_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vmax_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmax_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmax_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vmax_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vmax_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmax_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vmax_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmax_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmax_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vmax_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vmax_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmax_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vmax_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmax_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmax_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vmax_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vmax_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmax_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vmax_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vmax_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmax_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vmax_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vmax_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmax_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vmax_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmax_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmax_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vmax_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vmax_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmax_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vmax_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmax_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmax_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vmax_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vmax_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmax_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vmax_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmax_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmax_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vmax_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vmax_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmax_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vmax_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmax_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmax_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vmax_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vmax_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmax_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vmax_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmax_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmax_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vmax_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vmax_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmax_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vmax_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmax_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmax_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vmax_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vmax_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmax_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vmax_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmax_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmax_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vmax_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vmax_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmax_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vmax_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmax_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vmaxu.c b/auto-generated/policy_funcs/llvm-api-tests/vmaxu.c index f4228c360..73be8387a 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vmaxu.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vmaxu.c @@ -5,706 +5,939 @@ #include -vuint8mf8_t test_vmaxu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vmaxu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vmaxu_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vmaxu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vmaxu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vmaxu_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vmaxu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vmaxu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vmaxu_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vmaxu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vmaxu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vmaxu_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vmaxu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vmaxu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vmaxu_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vmaxu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vmaxu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vmaxu_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vmaxu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vmaxu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vmaxu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vmaxu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vmaxu_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vmaxu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vmaxu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vmaxu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vmaxu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vmaxu_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vmaxu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vmaxu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vmaxu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vmaxu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vmaxu_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vmaxu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vmaxu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vmaxu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vmaxu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vmaxu_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vmaxu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vmaxu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vmaxu_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vmaxu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vmaxu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmaxu_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vmaxu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vmaxu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vmaxu_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vmaxu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vmaxu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmaxu_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vmaxu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vmaxu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vmaxu_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vmaxu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vmaxu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmaxu_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vmaxu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vmaxu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vmaxu_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vmaxu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vmaxu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmaxu_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vmaxu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vmaxu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vmaxu_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vmaxu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vmaxu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmaxu_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vmaxu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vmaxu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vmaxu_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vmaxu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vmaxu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmaxu_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vmaxu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vmaxu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vmaxu_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vmaxu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vmaxu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmaxu_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vmaxu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vmaxu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vmaxu_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vmaxu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vmaxu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmaxu_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vmaxu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vmaxu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vmaxu_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vmaxu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vmaxu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmaxu_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vmaxu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vmaxu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vmaxu_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vmaxu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vmaxu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmaxu_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vmaxu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vmaxu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vmaxu_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vmaxu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vmaxu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmaxu_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vmaxu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vmaxu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vmaxu_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vmaxu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vmaxu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vmaxu_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vmaxu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vmaxu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vmaxu_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vmaxu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vmaxu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vmaxu_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vmaxu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vmaxu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vmaxu_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vmaxu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vmaxu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vmaxu_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vmaxu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vmaxu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vmaxu_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vmaxu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vmaxu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vmaxu_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vmaxu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vmaxu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vmaxu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vmaxu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmaxu_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vmaxu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vmaxu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vmaxu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vmaxu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmaxu_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vmaxu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vmaxu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vmaxu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vmaxu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmaxu_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vmaxu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vmaxu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vmaxu_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vmaxu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vmaxu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmaxu_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vmaxu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vmaxu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vmaxu_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vmaxu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vmaxu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmaxu_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vmaxu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vmaxu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vmaxu_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vmaxu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vmaxu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmaxu_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vmaxu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vmaxu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vmaxu_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vmaxu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vmaxu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmaxu_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vmaxu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vmaxu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vmaxu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vmaxu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmaxu_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vmaxu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vmaxu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vmaxu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vmaxu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmaxu_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vmaxu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vmaxu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vmaxu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vmaxu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vmaxu_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vmaxu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vmaxu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vmaxu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vmaxu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vmaxu_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vmaxu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vmaxu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vmaxu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vmaxu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vmaxu_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vmaxu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vmaxu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vmaxu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vmaxu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vmaxu_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vmaxu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vmaxu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vmaxu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vmaxu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmaxu_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vmaxu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vmaxu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vmaxu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vmaxu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmaxu_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vmaxu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vmaxu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vmaxu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vmaxu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmaxu_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vmaxu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vmaxu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vmaxu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vmaxu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmaxu_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vmaxu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vmaxu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vmaxu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vmaxu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmaxu_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vmaxu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vmaxu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vmaxu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vmaxu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmaxu_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vmaxu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vmaxu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vmaxu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vmaxu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmaxu_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vmaxu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vmaxu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vmaxu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vmaxu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmaxu_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vmaxu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vmaxu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vmaxu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vmaxu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmaxu_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vmaxu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vmaxu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vmaxu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vmaxu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmaxu_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vmaxu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vmaxu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vmaxu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vmaxu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmaxu_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vmaxu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vmaxu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vmaxu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vmaxu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmaxu_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vmaxu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vmaxu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vmaxu_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vmaxu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vmaxu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmaxu_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vmaxu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vmaxu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vmaxu_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vmaxu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vmaxu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmaxu_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vmaxu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vmaxu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vmaxu_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vmaxu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vmaxu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmaxu_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vmaxu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vmaxu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vmaxu_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vmaxu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vmaxu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmaxu_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vmaxu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vmaxu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vmaxu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vmaxu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmaxu_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vmaxu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vmaxu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vmaxu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vmaxu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmaxu_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vmaxu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vmaxu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vmaxu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vmaxu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vmaxu_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vmaxu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vmaxu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vmaxu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vmaxu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vmaxu_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vmaxu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vmaxu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vmaxu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vmaxu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vmaxu_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vmaxu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vmaxu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vmaxu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vmaxu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vmaxu_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vmaxu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vmaxu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vmaxu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vmaxu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmaxu_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vmaxu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vmaxu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vmaxu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vmaxu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmaxu_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vmaxu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vmaxu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vmaxu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vmaxu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmaxu_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vmaxu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vmaxu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vmaxu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vmaxu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmaxu_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vmaxu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vmaxu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vmaxu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vmaxu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmaxu_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vmaxu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vmaxu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vmaxu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vmaxu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmaxu_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vmaxu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vmaxu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vmaxu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vmaxu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmaxu_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vmaxu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vmaxu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vmaxu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vmaxu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmaxu_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vmaxu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vmaxu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vmaxu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vmaxu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmaxu_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vmaxu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vmaxu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vmaxu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vmaxu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmaxu_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vmaxu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vmaxu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vmaxu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vmaxu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmaxu_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vmaxu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vmaxu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vmaxu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vmaxu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmaxu_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vmaxu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vmaxu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vmaxu_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vmaxu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vmaxu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmaxu_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vmaxu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vmaxu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vmaxu_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vmaxu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vmaxu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmaxu_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vmaxu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vmaxu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vmaxu_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vmaxu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vmaxu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmaxu_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vmaxu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vmaxu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vmaxu_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vmaxu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vmaxu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmaxu_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vmaxu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vmaxu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vmaxu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vmaxu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmaxu_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vmaxu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vmaxu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vmaxu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vmaxu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmaxu_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vmaxu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vmaxu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vmaxu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vmaxu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vmaxu_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vmaxu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vmaxu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vmaxu_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vmaxu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vmaxu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmaxu_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vmaxu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vmaxu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vmaxu_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vmaxu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vmaxu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmaxu_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vmaxu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vmaxu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vmaxu_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vmaxu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vmaxu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmaxu_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vmaxu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vmaxu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vmaxu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vmaxu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmaxu_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vmaxu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vmaxu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vmaxu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vmaxu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmaxu_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vmaxu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vmaxu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vmaxu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vmaxu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmaxu_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vmaxu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vmaxu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vmaxu_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vmaxu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vmaxu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmaxu_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vmaxu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vmaxu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vmaxu_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vmaxu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vmaxu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmaxu_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vmaxu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vmaxu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vmaxu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vmaxu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmaxu_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vmaxu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vmaxu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vmaxu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vmaxu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmaxu_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vmaxu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vmaxu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vmaxu_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vmaxu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vmaxu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmaxu_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vmaxu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vmaxu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vmaxu_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vmaxu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vmaxu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vmaxu_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vmerge.c b/auto-generated/policy_funcs/llvm-api-tests/vmerge.c index 4cb59d966..2c399bce7 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vmerge.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vmerge.c @@ -1,419 +1,543 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vint8mf8_t test_vmerge_vvm_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, vbool64_t v0, size_t vl) { +vint8mf8_t test_vmerge_vvm_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, vbool64_t v0, size_t vl) { return __riscv_vmerge_vvm_i8mf8_tu(vd, vs2, vs1, v0, vl); } -vint8mf8_t test_vmerge_vxm_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, vbool64_t v0, size_t vl) { +vint8mf8_t test_vmerge_vxm_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, + vbool64_t v0, size_t vl) { return __riscv_vmerge_vxm_i8mf8_tu(vd, vs2, rs1, v0, vl); } -vint8mf4_t test_vmerge_vvm_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, vbool32_t v0, size_t vl) { +vint8mf4_t test_vmerge_vvm_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, vbool32_t v0, size_t vl) { return __riscv_vmerge_vvm_i8mf4_tu(vd, vs2, vs1, v0, vl); } -vint8mf4_t test_vmerge_vxm_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, vbool32_t v0, size_t vl) { +vint8mf4_t test_vmerge_vxm_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, + vbool32_t v0, size_t vl) { return __riscv_vmerge_vxm_i8mf4_tu(vd, vs2, rs1, v0, vl); } -vint8mf2_t test_vmerge_vvm_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, vbool16_t v0, size_t vl) { +vint8mf2_t test_vmerge_vvm_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, vbool16_t v0, size_t vl) { return __riscv_vmerge_vvm_i8mf2_tu(vd, vs2, vs1, v0, vl); } -vint8mf2_t test_vmerge_vxm_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, vbool16_t v0, size_t vl) { +vint8mf2_t test_vmerge_vxm_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, + vbool16_t v0, size_t vl) { return __riscv_vmerge_vxm_i8mf2_tu(vd, vs2, rs1, v0, vl); } -vint8m1_t test_vmerge_vvm_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, vbool8_t v0, size_t vl) { +vint8m1_t test_vmerge_vvm_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, + vbool8_t v0, size_t vl) { return __riscv_vmerge_vvm_i8m1_tu(vd, vs2, vs1, v0, vl); } -vint8m1_t test_vmerge_vxm_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, vbool8_t v0, size_t vl) { +vint8m1_t test_vmerge_vxm_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, + vbool8_t v0, size_t vl) { return __riscv_vmerge_vxm_i8m1_tu(vd, vs2, rs1, v0, vl); } -vint8m2_t test_vmerge_vvm_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, vbool4_t v0, size_t vl) { +vint8m2_t test_vmerge_vvm_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, + vbool4_t v0, size_t vl) { return __riscv_vmerge_vvm_i8m2_tu(vd, vs2, vs1, v0, vl); } -vint8m2_t test_vmerge_vxm_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, vbool4_t v0, size_t vl) { +vint8m2_t test_vmerge_vxm_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, + vbool4_t v0, size_t vl) { return __riscv_vmerge_vxm_i8m2_tu(vd, vs2, rs1, v0, vl); } -vint8m4_t test_vmerge_vvm_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, vbool2_t v0, size_t vl) { +vint8m4_t test_vmerge_vvm_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, + vbool2_t v0, size_t vl) { return __riscv_vmerge_vvm_i8m4_tu(vd, vs2, vs1, v0, vl); } -vint8m4_t test_vmerge_vxm_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, vbool2_t v0, size_t vl) { +vint8m4_t test_vmerge_vxm_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, + vbool2_t v0, size_t vl) { return __riscv_vmerge_vxm_i8m4_tu(vd, vs2, rs1, v0, vl); } -vint8m8_t test_vmerge_vvm_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, vbool1_t v0, size_t vl) { +vint8m8_t test_vmerge_vvm_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, + vbool1_t v0, size_t vl) { return __riscv_vmerge_vvm_i8m8_tu(vd, vs2, vs1, v0, vl); } -vint8m8_t test_vmerge_vxm_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, vbool1_t v0, size_t vl) { +vint8m8_t test_vmerge_vxm_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, + vbool1_t v0, size_t vl) { return __riscv_vmerge_vxm_i8m8_tu(vd, vs2, rs1, v0, vl); } -vint16mf4_t test_vmerge_vvm_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, vbool64_t v0, size_t vl) { +vint16mf4_t test_vmerge_vvm_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + vint16mf4_t vs1, vbool64_t v0, + size_t vl) { return __riscv_vmerge_vvm_i16mf4_tu(vd, vs2, vs1, v0, vl); } -vint16mf4_t test_vmerge_vxm_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, vbool64_t v0, size_t vl) { +vint16mf4_t test_vmerge_vxm_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + int16_t rs1, vbool64_t v0, size_t vl) { return __riscv_vmerge_vxm_i16mf4_tu(vd, vs2, rs1, v0, vl); } -vint16mf2_t test_vmerge_vvm_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, vbool32_t v0, size_t vl) { +vint16mf2_t test_vmerge_vvm_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + vint16mf2_t vs1, vbool32_t v0, + size_t vl) { return __riscv_vmerge_vvm_i16mf2_tu(vd, vs2, vs1, v0, vl); } -vint16mf2_t test_vmerge_vxm_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, vbool32_t v0, size_t vl) { +vint16mf2_t test_vmerge_vxm_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + int16_t rs1, vbool32_t v0, size_t vl) { return __riscv_vmerge_vxm_i16mf2_tu(vd, vs2, rs1, v0, vl); } -vint16m1_t test_vmerge_vvm_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, vbool16_t v0, size_t vl) { +vint16m1_t test_vmerge_vvm_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, vbool16_t v0, size_t vl) { return __riscv_vmerge_vvm_i16m1_tu(vd, vs2, vs1, v0, vl); } -vint16m1_t test_vmerge_vxm_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, vbool16_t v0, size_t vl) { +vint16m1_t test_vmerge_vxm_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, + vbool16_t v0, size_t vl) { return __riscv_vmerge_vxm_i16m1_tu(vd, vs2, rs1, v0, vl); } -vint16m2_t test_vmerge_vvm_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, vbool8_t v0, size_t vl) { +vint16m2_t test_vmerge_vvm_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, vbool8_t v0, size_t vl) { return __riscv_vmerge_vvm_i16m2_tu(vd, vs2, vs1, v0, vl); } -vint16m2_t test_vmerge_vxm_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, vbool8_t v0, size_t vl) { +vint16m2_t test_vmerge_vxm_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, + vbool8_t v0, size_t vl) { return __riscv_vmerge_vxm_i16m2_tu(vd, vs2, rs1, v0, vl); } -vint16m4_t test_vmerge_vvm_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, vbool4_t v0, size_t vl) { +vint16m4_t test_vmerge_vvm_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, vbool4_t v0, size_t vl) { return __riscv_vmerge_vvm_i16m4_tu(vd, vs2, vs1, v0, vl); } -vint16m4_t test_vmerge_vxm_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, vbool4_t v0, size_t vl) { +vint16m4_t test_vmerge_vxm_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, + vbool4_t v0, size_t vl) { return __riscv_vmerge_vxm_i16m4_tu(vd, vs2, rs1, v0, vl); } -vint16m8_t test_vmerge_vvm_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, vbool2_t v0, size_t vl) { +vint16m8_t test_vmerge_vvm_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, vbool2_t v0, size_t vl) { return __riscv_vmerge_vvm_i16m8_tu(vd, vs2, vs1, v0, vl); } -vint16m8_t test_vmerge_vxm_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, vbool2_t v0, size_t vl) { +vint16m8_t test_vmerge_vxm_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, + vbool2_t v0, size_t vl) { return __riscv_vmerge_vxm_i16m8_tu(vd, vs2, rs1, v0, vl); } -vint32mf2_t test_vmerge_vvm_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, vbool64_t v0, size_t vl) { +vint32mf2_t test_vmerge_vvm_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + vint32mf2_t vs1, vbool64_t v0, + size_t vl) { return __riscv_vmerge_vvm_i32mf2_tu(vd, vs2, vs1, v0, vl); } -vint32mf2_t test_vmerge_vxm_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, vbool64_t v0, size_t vl) { +vint32mf2_t test_vmerge_vxm_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + int32_t rs1, vbool64_t v0, size_t vl) { return __riscv_vmerge_vxm_i32mf2_tu(vd, vs2, rs1, v0, vl); } -vint32m1_t test_vmerge_vvm_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, vbool32_t v0, size_t vl) { +vint32m1_t test_vmerge_vvm_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, vbool32_t v0, size_t vl) { return __riscv_vmerge_vvm_i32m1_tu(vd, vs2, vs1, v0, vl); } -vint32m1_t test_vmerge_vxm_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, vbool32_t v0, size_t vl) { +vint32m1_t test_vmerge_vxm_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, + vbool32_t v0, size_t vl) { return __riscv_vmerge_vxm_i32m1_tu(vd, vs2, rs1, v0, vl); } -vint32m2_t test_vmerge_vvm_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, vbool16_t v0, size_t vl) { +vint32m2_t test_vmerge_vvm_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, vbool16_t v0, size_t vl) { return __riscv_vmerge_vvm_i32m2_tu(vd, vs2, vs1, v0, vl); } -vint32m2_t test_vmerge_vxm_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, vbool16_t v0, size_t vl) { +vint32m2_t test_vmerge_vxm_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, + vbool16_t v0, size_t vl) { return __riscv_vmerge_vxm_i32m2_tu(vd, vs2, rs1, v0, vl); } -vint32m4_t test_vmerge_vvm_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, vbool8_t v0, size_t vl) { +vint32m4_t test_vmerge_vvm_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, vbool8_t v0, size_t vl) { return __riscv_vmerge_vvm_i32m4_tu(vd, vs2, vs1, v0, vl); } -vint32m4_t test_vmerge_vxm_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, vbool8_t v0, size_t vl) { +vint32m4_t test_vmerge_vxm_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, + vbool8_t v0, size_t vl) { return __riscv_vmerge_vxm_i32m4_tu(vd, vs2, rs1, v0, vl); } -vint32m8_t test_vmerge_vvm_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, vbool4_t v0, size_t vl) { +vint32m8_t test_vmerge_vvm_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, vbool4_t v0, size_t vl) { return __riscv_vmerge_vvm_i32m8_tu(vd, vs2, vs1, v0, vl); } -vint32m8_t test_vmerge_vxm_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, vbool4_t v0, size_t vl) { +vint32m8_t test_vmerge_vxm_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, + vbool4_t v0, size_t vl) { return __riscv_vmerge_vxm_i32m8_tu(vd, vs2, rs1, v0, vl); } -vint64m1_t test_vmerge_vvm_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, vbool64_t v0, size_t vl) { +vint64m1_t test_vmerge_vvm_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, vbool64_t v0, size_t vl) { return __riscv_vmerge_vvm_i64m1_tu(vd, vs2, vs1, v0, vl); } -vint64m1_t test_vmerge_vxm_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, vbool64_t v0, size_t vl) { +vint64m1_t test_vmerge_vxm_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, + vbool64_t v0, size_t vl) { return __riscv_vmerge_vxm_i64m1_tu(vd, vs2, rs1, v0, vl); } -vint64m2_t test_vmerge_vvm_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, vbool32_t v0, size_t vl) { +vint64m2_t test_vmerge_vvm_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, vbool32_t v0, size_t vl) { return __riscv_vmerge_vvm_i64m2_tu(vd, vs2, vs1, v0, vl); } -vint64m2_t test_vmerge_vxm_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, vbool32_t v0, size_t vl) { +vint64m2_t test_vmerge_vxm_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, + vbool32_t v0, size_t vl) { return __riscv_vmerge_vxm_i64m2_tu(vd, vs2, rs1, v0, vl); } -vint64m4_t test_vmerge_vvm_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, vbool16_t v0, size_t vl) { +vint64m4_t test_vmerge_vvm_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, vbool16_t v0, size_t vl) { return __riscv_vmerge_vvm_i64m4_tu(vd, vs2, vs1, v0, vl); } -vint64m4_t test_vmerge_vxm_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, vbool16_t v0, size_t vl) { +vint64m4_t test_vmerge_vxm_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, + vbool16_t v0, size_t vl) { return __riscv_vmerge_vxm_i64m4_tu(vd, vs2, rs1, v0, vl); } -vint64m8_t test_vmerge_vvm_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, vbool8_t v0, size_t vl) { +vint64m8_t test_vmerge_vvm_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, vbool8_t v0, size_t vl) { return __riscv_vmerge_vvm_i64m8_tu(vd, vs2, vs1, v0, vl); } -vint64m8_t test_vmerge_vxm_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, vbool8_t v0, size_t vl) { +vint64m8_t test_vmerge_vxm_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, + vbool8_t v0, size_t vl) { return __riscv_vmerge_vxm_i64m8_tu(vd, vs2, rs1, v0, vl); } -vuint8mf8_t test_vmerge_vvm_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, vbool64_t v0, size_t vl) { +vuint8mf8_t test_vmerge_vvm_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, vbool64_t v0, size_t vl) { return __riscv_vmerge_vvm_u8mf8_tu(vd, vs2, vs1, v0, vl); } -vuint8mf8_t test_vmerge_vxm_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, vbool64_t v0, size_t vl) { +vuint8mf8_t test_vmerge_vxm_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + uint8_t rs1, vbool64_t v0, size_t vl) { return __riscv_vmerge_vxm_u8mf8_tu(vd, vs2, rs1, v0, vl); } -vuint8mf4_t test_vmerge_vvm_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, vbool32_t v0, size_t vl) { +vuint8mf4_t test_vmerge_vvm_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, vbool32_t v0, size_t vl) { return __riscv_vmerge_vvm_u8mf4_tu(vd, vs2, vs1, v0, vl); } -vuint8mf4_t test_vmerge_vxm_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, vbool32_t v0, size_t vl) { +vuint8mf4_t test_vmerge_vxm_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + uint8_t rs1, vbool32_t v0, size_t vl) { return __riscv_vmerge_vxm_u8mf4_tu(vd, vs2, rs1, v0, vl); } -vuint8mf2_t test_vmerge_vvm_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, vbool16_t v0, size_t vl) { +vuint8mf2_t test_vmerge_vvm_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, vbool16_t v0, size_t vl) { return __riscv_vmerge_vvm_u8mf2_tu(vd, vs2, vs1, v0, vl); } -vuint8mf2_t test_vmerge_vxm_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, vbool16_t v0, size_t vl) { +vuint8mf2_t test_vmerge_vxm_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + uint8_t rs1, vbool16_t v0, size_t vl) { return __riscv_vmerge_vxm_u8mf2_tu(vd, vs2, rs1, v0, vl); } -vuint8m1_t test_vmerge_vvm_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, vbool8_t v0, size_t vl) { +vuint8m1_t test_vmerge_vvm_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, vbool8_t v0, size_t vl) { return __riscv_vmerge_vvm_u8m1_tu(vd, vs2, vs1, v0, vl); } -vuint8m1_t test_vmerge_vxm_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, vbool8_t v0, size_t vl) { +vuint8m1_t test_vmerge_vxm_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + vbool8_t v0, size_t vl) { return __riscv_vmerge_vxm_u8m1_tu(vd, vs2, rs1, v0, vl); } -vuint8m2_t test_vmerge_vvm_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, vbool4_t v0, size_t vl) { +vuint8m2_t test_vmerge_vvm_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, vbool4_t v0, size_t vl) { return __riscv_vmerge_vvm_u8m2_tu(vd, vs2, vs1, v0, vl); } -vuint8m2_t test_vmerge_vxm_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, vbool4_t v0, size_t vl) { +vuint8m2_t test_vmerge_vxm_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + vbool4_t v0, size_t vl) { return __riscv_vmerge_vxm_u8m2_tu(vd, vs2, rs1, v0, vl); } -vuint8m4_t test_vmerge_vvm_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, vbool2_t v0, size_t vl) { +vuint8m4_t test_vmerge_vvm_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, vbool2_t v0, size_t vl) { return __riscv_vmerge_vvm_u8m4_tu(vd, vs2, vs1, v0, vl); } -vuint8m4_t test_vmerge_vxm_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, vbool2_t v0, size_t vl) { +vuint8m4_t test_vmerge_vxm_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + vbool2_t v0, size_t vl) { return __riscv_vmerge_vxm_u8m4_tu(vd, vs2, rs1, v0, vl); } -vuint8m8_t test_vmerge_vvm_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, vbool1_t v0, size_t vl) { +vuint8m8_t test_vmerge_vvm_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, vbool1_t v0, size_t vl) { return __riscv_vmerge_vvm_u8m8_tu(vd, vs2, vs1, v0, vl); } -vuint8m8_t test_vmerge_vxm_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, vbool1_t v0, size_t vl) { +vuint8m8_t test_vmerge_vxm_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + vbool1_t v0, size_t vl) { return __riscv_vmerge_vxm_u8m8_tu(vd, vs2, rs1, v0, vl); } -vuint16mf4_t test_vmerge_vvm_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, vbool64_t v0, size_t vl) { +vuint16mf4_t test_vmerge_vvm_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, vbool64_t v0, + size_t vl) { return __riscv_vmerge_vvm_u16mf4_tu(vd, vs2, vs1, v0, vl); } -vuint16mf4_t test_vmerge_vxm_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, vbool64_t v0, size_t vl) { +vuint16mf4_t test_vmerge_vxm_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, vbool64_t v0, size_t vl) { return __riscv_vmerge_vxm_u16mf4_tu(vd, vs2, rs1, v0, vl); } -vuint16mf2_t test_vmerge_vvm_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, vbool32_t v0, size_t vl) { +vuint16mf2_t test_vmerge_vvm_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, vbool32_t v0, + size_t vl) { return __riscv_vmerge_vvm_u16mf2_tu(vd, vs2, vs1, v0, vl); } -vuint16mf2_t test_vmerge_vxm_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, vbool32_t v0, size_t vl) { +vuint16mf2_t test_vmerge_vxm_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, vbool32_t v0, size_t vl) { return __riscv_vmerge_vxm_u16mf2_tu(vd, vs2, rs1, v0, vl); } -vuint16m1_t test_vmerge_vvm_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, vbool16_t v0, size_t vl) { +vuint16m1_t test_vmerge_vvm_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, vbool16_t v0, size_t vl) { return __riscv_vmerge_vvm_u16m1_tu(vd, vs2, vs1, v0, vl); } -vuint16m1_t test_vmerge_vxm_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, vbool16_t v0, size_t vl) { +vuint16m1_t test_vmerge_vxm_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, vbool16_t v0, size_t vl) { return __riscv_vmerge_vxm_u16m1_tu(vd, vs2, rs1, v0, vl); } -vuint16m2_t test_vmerge_vvm_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, vbool8_t v0, size_t vl) { +vuint16m2_t test_vmerge_vvm_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, vbool8_t v0, size_t vl) { return __riscv_vmerge_vvm_u16m2_tu(vd, vs2, vs1, v0, vl); } -vuint16m2_t test_vmerge_vxm_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, vbool8_t v0, size_t vl) { +vuint16m2_t test_vmerge_vxm_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, vbool8_t v0, size_t vl) { return __riscv_vmerge_vxm_u16m2_tu(vd, vs2, rs1, v0, vl); } -vuint16m4_t test_vmerge_vvm_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, vbool4_t v0, size_t vl) { +vuint16m4_t test_vmerge_vvm_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, vbool4_t v0, size_t vl) { return __riscv_vmerge_vvm_u16m4_tu(vd, vs2, vs1, v0, vl); } -vuint16m4_t test_vmerge_vxm_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, vbool4_t v0, size_t vl) { +vuint16m4_t test_vmerge_vxm_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, vbool4_t v0, size_t vl) { return __riscv_vmerge_vxm_u16m4_tu(vd, vs2, rs1, v0, vl); } -vuint16m8_t test_vmerge_vvm_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, vbool2_t v0, size_t vl) { +vuint16m8_t test_vmerge_vvm_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, vbool2_t v0, size_t vl) { return __riscv_vmerge_vvm_u16m8_tu(vd, vs2, vs1, v0, vl); } -vuint16m8_t test_vmerge_vxm_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, vbool2_t v0, size_t vl) { +vuint16m8_t test_vmerge_vxm_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, vbool2_t v0, size_t vl) { return __riscv_vmerge_vxm_u16m8_tu(vd, vs2, rs1, v0, vl); } -vuint32mf2_t test_vmerge_vvm_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, vbool64_t v0, size_t vl) { +vuint32mf2_t test_vmerge_vvm_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, vbool64_t v0, + size_t vl) { return __riscv_vmerge_vvm_u32mf2_tu(vd, vs2, vs1, v0, vl); } -vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, vbool64_t v0, size_t vl) { +vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, vbool64_t v0, size_t vl) { return __riscv_vmerge_vxm_u32mf2_tu(vd, vs2, rs1, v0, vl); } -vuint32m1_t test_vmerge_vvm_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, vbool32_t v0, size_t vl) { +vuint32m1_t test_vmerge_vvm_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, vbool32_t v0, size_t vl) { return __riscv_vmerge_vvm_u32m1_tu(vd, vs2, vs1, v0, vl); } -vuint32m1_t test_vmerge_vxm_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, vbool32_t v0, size_t vl) { +vuint32m1_t test_vmerge_vxm_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, vbool32_t v0, size_t vl) { return __riscv_vmerge_vxm_u32m1_tu(vd, vs2, rs1, v0, vl); } -vuint32m2_t test_vmerge_vvm_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, vbool16_t v0, size_t vl) { +vuint32m2_t test_vmerge_vvm_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, vbool16_t v0, size_t vl) { return __riscv_vmerge_vvm_u32m2_tu(vd, vs2, vs1, v0, vl); } -vuint32m2_t test_vmerge_vxm_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, vbool16_t v0, size_t vl) { +vuint32m2_t test_vmerge_vxm_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, vbool16_t v0, size_t vl) { return __riscv_vmerge_vxm_u32m2_tu(vd, vs2, rs1, v0, vl); } -vuint32m4_t test_vmerge_vvm_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, vbool8_t v0, size_t vl) { +vuint32m4_t test_vmerge_vvm_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, vbool8_t v0, size_t vl) { return __riscv_vmerge_vvm_u32m4_tu(vd, vs2, vs1, v0, vl); } -vuint32m4_t test_vmerge_vxm_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, vbool8_t v0, size_t vl) { +vuint32m4_t test_vmerge_vxm_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, vbool8_t v0, size_t vl) { return __riscv_vmerge_vxm_u32m4_tu(vd, vs2, rs1, v0, vl); } -vuint32m8_t test_vmerge_vvm_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, vbool4_t v0, size_t vl) { +vuint32m8_t test_vmerge_vvm_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, vbool4_t v0, size_t vl) { return __riscv_vmerge_vvm_u32m8_tu(vd, vs2, vs1, v0, vl); } -vuint32m8_t test_vmerge_vxm_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, vbool4_t v0, size_t vl) { +vuint32m8_t test_vmerge_vxm_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, vbool4_t v0, size_t vl) { return __riscv_vmerge_vxm_u32m8_tu(vd, vs2, rs1, v0, vl); } -vuint64m1_t test_vmerge_vvm_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, vbool64_t v0, size_t vl) { +vuint64m1_t test_vmerge_vvm_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, vbool64_t v0, size_t vl) { return __riscv_vmerge_vvm_u64m1_tu(vd, vs2, vs1, v0, vl); } -vuint64m1_t test_vmerge_vxm_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, vbool64_t v0, size_t vl) { +vuint64m1_t test_vmerge_vxm_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, vbool64_t v0, size_t vl) { return __riscv_vmerge_vxm_u64m1_tu(vd, vs2, rs1, v0, vl); } -vuint64m2_t test_vmerge_vvm_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, vbool32_t v0, size_t vl) { +vuint64m2_t test_vmerge_vvm_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, vbool32_t v0, size_t vl) { return __riscv_vmerge_vvm_u64m2_tu(vd, vs2, vs1, v0, vl); } -vuint64m2_t test_vmerge_vxm_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, vbool32_t v0, size_t vl) { +vuint64m2_t test_vmerge_vxm_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, vbool32_t v0, size_t vl) { return __riscv_vmerge_vxm_u64m2_tu(vd, vs2, rs1, v0, vl); } -vuint64m4_t test_vmerge_vvm_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, vbool16_t v0, size_t vl) { +vuint64m4_t test_vmerge_vvm_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, vbool16_t v0, size_t vl) { return __riscv_vmerge_vvm_u64m4_tu(vd, vs2, vs1, v0, vl); } -vuint64m4_t test_vmerge_vxm_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, vbool16_t v0, size_t vl) { +vuint64m4_t test_vmerge_vxm_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, vbool16_t v0, size_t vl) { return __riscv_vmerge_vxm_u64m4_tu(vd, vs2, rs1, v0, vl); } -vuint64m8_t test_vmerge_vvm_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, vbool8_t v0, size_t vl) { +vuint64m8_t test_vmerge_vvm_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, vbool8_t v0, size_t vl) { return __riscv_vmerge_vvm_u64m8_tu(vd, vs2, vs1, v0, vl); } -vuint64m8_t test_vmerge_vxm_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, vbool8_t v0, size_t vl) { +vuint64m8_t test_vmerge_vxm_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, vbool8_t v0, size_t vl) { return __riscv_vmerge_vxm_u64m8_tu(vd, vs2, rs1, v0, vl); } -vfloat16mf4_t test_vmerge_vvm_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, vbool64_t v0, size_t vl) { +vfloat16mf4_t test_vmerge_vvm_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + vfloat16mf4_t vs1, vbool64_t v0, + size_t vl) { return __riscv_vmerge_vvm_f16mf4_tu(vd, vs2, vs1, v0, vl); } -vfloat16mf2_t test_vmerge_vvm_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, vbool32_t v0, size_t vl) { +vfloat16mf2_t test_vmerge_vvm_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + vfloat16mf2_t vs1, vbool32_t v0, + size_t vl) { return __riscv_vmerge_vvm_f16mf2_tu(vd, vs2, vs1, v0, vl); } -vfloat16m1_t test_vmerge_vvm_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, vbool16_t v0, size_t vl) { +vfloat16m1_t test_vmerge_vvm_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + vfloat16m1_t vs1, vbool16_t v0, + size_t vl) { return __riscv_vmerge_vvm_f16m1_tu(vd, vs2, vs1, v0, vl); } -vfloat16m2_t test_vmerge_vvm_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, vbool8_t v0, size_t vl) { +vfloat16m2_t test_vmerge_vvm_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + vfloat16m2_t vs1, vbool8_t v0, + size_t vl) { return __riscv_vmerge_vvm_f16m2_tu(vd, vs2, vs1, v0, vl); } -vfloat16m4_t test_vmerge_vvm_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, vbool4_t v0, size_t vl) { +vfloat16m4_t test_vmerge_vvm_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + vfloat16m4_t vs1, vbool4_t v0, + size_t vl) { return __riscv_vmerge_vvm_f16m4_tu(vd, vs2, vs1, v0, vl); } -vfloat16m8_t test_vmerge_vvm_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, vbool2_t v0, size_t vl) { +vfloat16m8_t test_vmerge_vvm_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + vfloat16m8_t vs1, vbool2_t v0, + size_t vl) { return __riscv_vmerge_vvm_f16m8_tu(vd, vs2, vs1, v0, vl); } -vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, vbool64_t v0, size_t vl) { +vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + vfloat32mf2_t vs1, vbool64_t v0, + size_t vl) { return __riscv_vmerge_vvm_f32mf2_tu(vd, vs2, vs1, v0, vl); } -vfloat32m1_t test_vmerge_vvm_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, vbool32_t v0, size_t vl) { +vfloat32m1_t test_vmerge_vvm_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vfloat32m1_t vs1, vbool32_t v0, + size_t vl) { return __riscv_vmerge_vvm_f32m1_tu(vd, vs2, vs1, v0, vl); } -vfloat32m2_t test_vmerge_vvm_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, vbool16_t v0, size_t vl) { +vfloat32m2_t test_vmerge_vvm_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + vfloat32m2_t vs1, vbool16_t v0, + size_t vl) { return __riscv_vmerge_vvm_f32m2_tu(vd, vs2, vs1, v0, vl); } -vfloat32m4_t test_vmerge_vvm_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, vbool8_t v0, size_t vl) { +vfloat32m4_t test_vmerge_vvm_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + vfloat32m4_t vs1, vbool8_t v0, + size_t vl) { return __riscv_vmerge_vvm_f32m4_tu(vd, vs2, vs1, v0, vl); } -vfloat32m8_t test_vmerge_vvm_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, vbool4_t v0, size_t vl) { +vfloat32m8_t test_vmerge_vvm_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + vfloat32m8_t vs1, vbool4_t v0, + size_t vl) { return __riscv_vmerge_vvm_f32m8_tu(vd, vs2, vs1, v0, vl); } -vfloat64m1_t test_vmerge_vvm_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, vbool64_t v0, size_t vl) { +vfloat64m1_t test_vmerge_vvm_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + vfloat64m1_t vs1, vbool64_t v0, + size_t vl) { return __riscv_vmerge_vvm_f64m1_tu(vd, vs2, vs1, v0, vl); } -vfloat64m2_t test_vmerge_vvm_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, vbool32_t v0, size_t vl) { +vfloat64m2_t test_vmerge_vvm_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + vfloat64m2_t vs1, vbool32_t v0, + size_t vl) { return __riscv_vmerge_vvm_f64m2_tu(vd, vs2, vs1, v0, vl); } -vfloat64m4_t test_vmerge_vvm_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, vbool16_t v0, size_t vl) { +vfloat64m4_t test_vmerge_vvm_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + vfloat64m4_t vs1, vbool16_t v0, + size_t vl) { return __riscv_vmerge_vvm_f64m4_tu(vd, vs2, vs1, v0, vl); } -vfloat64m8_t test_vmerge_vvm_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, vbool8_t v0, size_t vl) { +vfloat64m8_t test_vmerge_vvm_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + vfloat64m8_t vs1, vbool8_t v0, + size_t vl) { return __riscv_vmerge_vvm_f64m8_tu(vd, vs2, vs1, v0, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vmfeq.c b/auto-generated/policy_funcs/llvm-api-tests/vmfeq.c index 73a0660d9..39a6bbd17 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vmfeq.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vmfeq.c @@ -1,127 +1,169 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vbool64_t test_vmfeq_vv_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vbool64_t test_vmfeq_vv_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vmfeq_vv_f16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfeq_vf_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vbool64_t test_vmfeq_vf_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vmfeq_vf_f16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfeq_vv_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vbool32_t test_vmfeq_vv_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vmfeq_vv_f16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfeq_vf_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vbool32_t test_vmfeq_vf_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vmfeq_vf_f16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfeq_vv_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vbool16_t test_vmfeq_vv_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vmfeq_vv_f16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfeq_vf_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vbool16_t test_vmfeq_vf_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vmfeq_vf_f16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfeq_vv_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vbool8_t test_vmfeq_vv_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, + vfloat16m2_t vs1, size_t vl) { return __riscv_vmfeq_vv_f16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfeq_vf_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vbool8_t test_vmfeq_vf_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vmfeq_vf_f16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmfeq_vv_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vbool4_t test_vmfeq_vv_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, + vfloat16m4_t vs1, size_t vl) { return __riscv_vmfeq_vv_f16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmfeq_vf_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vbool4_t test_vmfeq_vf_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vmfeq_vf_f16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmfeq_vv_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vbool2_t test_vmfeq_vv_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, + vfloat16m8_t vs1, size_t vl) { return __riscv_vmfeq_vv_f16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmfeq_vf_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vbool2_t test_vmfeq_vf_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vmfeq_vf_f16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmfeq_vv_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vbool64_t test_vmfeq_vv_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vmfeq_vv_f32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfeq_vf_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vbool64_t test_vmfeq_vf_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat32mf2_t vs2, float rs1, size_t vl) { return __riscv_vmfeq_vf_f32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfeq_vv_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vbool32_t test_vmfeq_vv_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vmfeq_vv_f32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfeq_vf_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vbool32_t test_vmfeq_vf_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vmfeq_vf_f32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfeq_vv_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vbool16_t test_vmfeq_vv_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vmfeq_vv_f32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfeq_vf_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vbool16_t test_vmfeq_vf_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vmfeq_vf_f32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfeq_vv_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vbool8_t test_vmfeq_vv_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, + vfloat32m4_t vs1, size_t vl) { return __riscv_vmfeq_vv_f32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfeq_vf_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vbool8_t test_vmfeq_vf_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, + float rs1, size_t vl) { return __riscv_vmfeq_vf_f32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmfeq_vv_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vbool4_t test_vmfeq_vv_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, + vfloat32m8_t vs1, size_t vl) { return __riscv_vmfeq_vv_f32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmfeq_vf_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vbool4_t test_vmfeq_vf_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, + float rs1, size_t vl) { return __riscv_vmfeq_vf_f32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmfeq_vv_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vbool64_t test_vmfeq_vv_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vmfeq_vv_f64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfeq_vf_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vbool64_t test_vmfeq_vf_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vmfeq_vf_f64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfeq_vv_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vbool32_t test_vmfeq_vv_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vmfeq_vv_f64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfeq_vf_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vbool32_t test_vmfeq_vf_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vmfeq_vf_f64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfeq_vv_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vbool16_t test_vmfeq_vv_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vmfeq_vv_f64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfeq_vf_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vbool16_t test_vmfeq_vf_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vmfeq_vf_f64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfeq_vv_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vbool8_t test_vmfeq_vv_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, + vfloat64m8_t vs1, size_t vl) { return __riscv_vmfeq_vv_f64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfeq_vf_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vbool8_t test_vmfeq_vf_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, + double rs1, size_t vl) { return __riscv_vmfeq_vf_f64m8_b8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vmfge.c b/auto-generated/policy_funcs/llvm-api-tests/vmfge.c index b4b373374..fe6a9d0f9 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vmfge.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vmfge.c @@ -1,127 +1,169 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vbool64_t test_vmfge_vv_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vbool64_t test_vmfge_vv_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vmfge_vv_f16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfge_vf_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vbool64_t test_vmfge_vf_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vmfge_vf_f16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfge_vv_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vbool32_t test_vmfge_vv_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vmfge_vv_f16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfge_vf_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vbool32_t test_vmfge_vf_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vmfge_vf_f16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfge_vv_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vbool16_t test_vmfge_vv_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vmfge_vv_f16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfge_vf_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vbool16_t test_vmfge_vf_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vmfge_vf_f16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfge_vv_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vbool8_t test_vmfge_vv_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, + vfloat16m2_t vs1, size_t vl) { return __riscv_vmfge_vv_f16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfge_vf_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vbool8_t test_vmfge_vf_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vmfge_vf_f16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmfge_vv_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vbool4_t test_vmfge_vv_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, + vfloat16m4_t vs1, size_t vl) { return __riscv_vmfge_vv_f16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmfge_vf_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vbool4_t test_vmfge_vf_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vmfge_vf_f16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmfge_vv_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vbool2_t test_vmfge_vv_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, + vfloat16m8_t vs1, size_t vl) { return __riscv_vmfge_vv_f16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmfge_vf_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vbool2_t test_vmfge_vf_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vmfge_vf_f16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmfge_vv_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vbool64_t test_vmfge_vv_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vmfge_vv_f32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfge_vf_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vbool64_t test_vmfge_vf_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat32mf2_t vs2, float rs1, size_t vl) { return __riscv_vmfge_vf_f32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfge_vv_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vbool32_t test_vmfge_vv_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vmfge_vv_f32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfge_vf_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vbool32_t test_vmfge_vf_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vmfge_vf_f32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfge_vv_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vbool16_t test_vmfge_vv_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vmfge_vv_f32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfge_vf_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vbool16_t test_vmfge_vf_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vmfge_vf_f32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfge_vv_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vbool8_t test_vmfge_vv_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, + vfloat32m4_t vs1, size_t vl) { return __riscv_vmfge_vv_f32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfge_vf_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vbool8_t test_vmfge_vf_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, + float rs1, size_t vl) { return __riscv_vmfge_vf_f32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmfge_vv_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vbool4_t test_vmfge_vv_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, + vfloat32m8_t vs1, size_t vl) { return __riscv_vmfge_vv_f32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmfge_vf_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vbool4_t test_vmfge_vf_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, + float rs1, size_t vl) { return __riscv_vmfge_vf_f32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmfge_vv_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vbool64_t test_vmfge_vv_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vmfge_vv_f64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfge_vf_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vbool64_t test_vmfge_vf_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vmfge_vf_f64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfge_vv_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vbool32_t test_vmfge_vv_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vmfge_vv_f64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfge_vf_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vbool32_t test_vmfge_vf_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vmfge_vf_f64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfge_vv_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vbool16_t test_vmfge_vv_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vmfge_vv_f64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfge_vf_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vbool16_t test_vmfge_vf_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vmfge_vf_f64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfge_vv_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vbool8_t test_vmfge_vv_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, + vfloat64m8_t vs1, size_t vl) { return __riscv_vmfge_vv_f64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfge_vf_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vbool8_t test_vmfge_vf_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, + double rs1, size_t vl) { return __riscv_vmfge_vf_f64m8_b8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vmfgt.c b/auto-generated/policy_funcs/llvm-api-tests/vmfgt.c index 30366708f..f30fee119 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vmfgt.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vmfgt.c @@ -1,127 +1,169 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vbool64_t test_vmfgt_vv_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vbool64_t test_vmfgt_vv_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vmfgt_vv_f16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfgt_vf_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vbool64_t test_vmfgt_vf_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vmfgt_vf_f16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfgt_vv_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vbool32_t test_vmfgt_vv_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vmfgt_vv_f16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfgt_vf_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vbool32_t test_vmfgt_vf_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vmfgt_vf_f16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfgt_vv_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vbool16_t test_vmfgt_vv_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vmfgt_vv_f16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfgt_vf_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vbool16_t test_vmfgt_vf_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vmfgt_vf_f16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfgt_vv_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vbool8_t test_vmfgt_vv_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, + vfloat16m2_t vs1, size_t vl) { return __riscv_vmfgt_vv_f16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfgt_vf_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vbool8_t test_vmfgt_vf_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vmfgt_vf_f16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmfgt_vv_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vbool4_t test_vmfgt_vv_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, + vfloat16m4_t vs1, size_t vl) { return __riscv_vmfgt_vv_f16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmfgt_vf_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vbool4_t test_vmfgt_vf_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vmfgt_vf_f16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmfgt_vv_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vbool2_t test_vmfgt_vv_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, + vfloat16m8_t vs1, size_t vl) { return __riscv_vmfgt_vv_f16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmfgt_vf_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vbool2_t test_vmfgt_vf_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vmfgt_vf_f16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmfgt_vv_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vbool64_t test_vmfgt_vv_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vmfgt_vv_f32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfgt_vf_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vbool64_t test_vmfgt_vf_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat32mf2_t vs2, float rs1, size_t vl) { return __riscv_vmfgt_vf_f32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfgt_vv_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vbool32_t test_vmfgt_vv_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vmfgt_vv_f32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfgt_vf_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vbool32_t test_vmfgt_vf_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vmfgt_vf_f32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfgt_vv_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vbool16_t test_vmfgt_vv_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vmfgt_vv_f32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfgt_vf_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vbool16_t test_vmfgt_vf_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vmfgt_vf_f32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfgt_vv_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vbool8_t test_vmfgt_vv_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, + vfloat32m4_t vs1, size_t vl) { return __riscv_vmfgt_vv_f32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfgt_vf_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vbool8_t test_vmfgt_vf_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, + float rs1, size_t vl) { return __riscv_vmfgt_vf_f32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmfgt_vv_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vbool4_t test_vmfgt_vv_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, + vfloat32m8_t vs1, size_t vl) { return __riscv_vmfgt_vv_f32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmfgt_vf_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vbool4_t test_vmfgt_vf_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, + float rs1, size_t vl) { return __riscv_vmfgt_vf_f32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmfgt_vv_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vbool64_t test_vmfgt_vv_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vmfgt_vv_f64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfgt_vf_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vbool64_t test_vmfgt_vf_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vmfgt_vf_f64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfgt_vv_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vbool32_t test_vmfgt_vv_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vmfgt_vv_f64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfgt_vf_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vbool32_t test_vmfgt_vf_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vmfgt_vf_f64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfgt_vv_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vbool16_t test_vmfgt_vv_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vmfgt_vv_f64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfgt_vf_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vbool16_t test_vmfgt_vf_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vmfgt_vf_f64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfgt_vv_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vbool8_t test_vmfgt_vv_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, + vfloat64m8_t vs1, size_t vl) { return __riscv_vmfgt_vv_f64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfgt_vf_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vbool8_t test_vmfgt_vf_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, + double rs1, size_t vl) { return __riscv_vmfgt_vf_f64m8_b8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vmfle.c b/auto-generated/policy_funcs/llvm-api-tests/vmfle.c index c1900e963..e19fce811 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vmfle.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vmfle.c @@ -1,127 +1,169 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vbool64_t test_vmfle_vv_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vbool64_t test_vmfle_vv_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vmfle_vv_f16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfle_vf_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vbool64_t test_vmfle_vf_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vmfle_vf_f16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfle_vv_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vbool32_t test_vmfle_vv_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vmfle_vv_f16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfle_vf_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vbool32_t test_vmfle_vf_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vmfle_vf_f16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfle_vv_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vbool16_t test_vmfle_vv_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vmfle_vv_f16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfle_vf_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vbool16_t test_vmfle_vf_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vmfle_vf_f16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfle_vv_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vbool8_t test_vmfle_vv_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, + vfloat16m2_t vs1, size_t vl) { return __riscv_vmfle_vv_f16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfle_vf_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vbool8_t test_vmfle_vf_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vmfle_vf_f16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmfle_vv_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vbool4_t test_vmfle_vv_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, + vfloat16m4_t vs1, size_t vl) { return __riscv_vmfle_vv_f16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmfle_vf_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vbool4_t test_vmfle_vf_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vmfle_vf_f16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmfle_vv_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vbool2_t test_vmfle_vv_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, + vfloat16m8_t vs1, size_t vl) { return __riscv_vmfle_vv_f16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmfle_vf_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vbool2_t test_vmfle_vf_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vmfle_vf_f16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmfle_vv_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vbool64_t test_vmfle_vv_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vmfle_vv_f32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfle_vf_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vbool64_t test_vmfle_vf_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat32mf2_t vs2, float rs1, size_t vl) { return __riscv_vmfle_vf_f32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfle_vv_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vbool32_t test_vmfle_vv_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vmfle_vv_f32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfle_vf_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vbool32_t test_vmfle_vf_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vmfle_vf_f32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfle_vv_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vbool16_t test_vmfle_vv_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vmfle_vv_f32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfle_vf_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vbool16_t test_vmfle_vf_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vmfle_vf_f32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfle_vv_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vbool8_t test_vmfle_vv_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, + vfloat32m4_t vs1, size_t vl) { return __riscv_vmfle_vv_f32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfle_vf_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vbool8_t test_vmfle_vf_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, + float rs1, size_t vl) { return __riscv_vmfle_vf_f32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmfle_vv_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vbool4_t test_vmfle_vv_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, + vfloat32m8_t vs1, size_t vl) { return __riscv_vmfle_vv_f32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmfle_vf_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vbool4_t test_vmfle_vf_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, + float rs1, size_t vl) { return __riscv_vmfle_vf_f32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmfle_vv_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vbool64_t test_vmfle_vv_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vmfle_vv_f64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfle_vf_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vbool64_t test_vmfle_vf_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vmfle_vf_f64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfle_vv_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vbool32_t test_vmfle_vv_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vmfle_vv_f64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfle_vf_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vbool32_t test_vmfle_vf_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vmfle_vf_f64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfle_vv_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vbool16_t test_vmfle_vv_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vmfle_vv_f64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfle_vf_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vbool16_t test_vmfle_vf_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vmfle_vf_f64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfle_vv_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vbool8_t test_vmfle_vv_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, + vfloat64m8_t vs1, size_t vl) { return __riscv_vmfle_vv_f64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfle_vf_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vbool8_t test_vmfle_vf_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, + double rs1, size_t vl) { return __riscv_vmfle_vf_f64m8_b8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vmflt.c b/auto-generated/policy_funcs/llvm-api-tests/vmflt.c index d02294919..847299193 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vmflt.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vmflt.c @@ -1,127 +1,169 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vbool64_t test_vmflt_vv_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vbool64_t test_vmflt_vv_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vmflt_vv_f16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmflt_vf_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vbool64_t test_vmflt_vf_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vmflt_vf_f16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmflt_vv_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vbool32_t test_vmflt_vv_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vmflt_vv_f16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmflt_vf_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vbool32_t test_vmflt_vf_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vmflt_vf_f16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmflt_vv_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vbool16_t test_vmflt_vv_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vmflt_vv_f16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmflt_vf_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vbool16_t test_vmflt_vf_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vmflt_vf_f16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmflt_vv_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vbool8_t test_vmflt_vv_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, + vfloat16m2_t vs1, size_t vl) { return __riscv_vmflt_vv_f16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmflt_vf_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vbool8_t test_vmflt_vf_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vmflt_vf_f16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmflt_vv_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vbool4_t test_vmflt_vv_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, + vfloat16m4_t vs1, size_t vl) { return __riscv_vmflt_vv_f16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmflt_vf_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vbool4_t test_vmflt_vf_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vmflt_vf_f16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmflt_vv_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vbool2_t test_vmflt_vv_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, + vfloat16m8_t vs1, size_t vl) { return __riscv_vmflt_vv_f16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmflt_vf_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vbool2_t test_vmflt_vf_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vmflt_vf_f16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmflt_vv_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vbool64_t test_vmflt_vv_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vmflt_vv_f32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmflt_vf_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vbool64_t test_vmflt_vf_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat32mf2_t vs2, float rs1, size_t vl) { return __riscv_vmflt_vf_f32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmflt_vv_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vbool32_t test_vmflt_vv_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vmflt_vv_f32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmflt_vf_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vbool32_t test_vmflt_vf_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vmflt_vf_f32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmflt_vv_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vbool16_t test_vmflt_vv_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vmflt_vv_f32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmflt_vf_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vbool16_t test_vmflt_vf_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vmflt_vf_f32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmflt_vv_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vbool8_t test_vmflt_vv_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, + vfloat32m4_t vs1, size_t vl) { return __riscv_vmflt_vv_f32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmflt_vf_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vbool8_t test_vmflt_vf_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, + float rs1, size_t vl) { return __riscv_vmflt_vf_f32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmflt_vv_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vbool4_t test_vmflt_vv_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, + vfloat32m8_t vs1, size_t vl) { return __riscv_vmflt_vv_f32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmflt_vf_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vbool4_t test_vmflt_vf_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, + float rs1, size_t vl) { return __riscv_vmflt_vf_f32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmflt_vv_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vbool64_t test_vmflt_vv_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vmflt_vv_f64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmflt_vf_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vbool64_t test_vmflt_vf_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vmflt_vf_f64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmflt_vv_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vbool32_t test_vmflt_vv_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vmflt_vv_f64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmflt_vf_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vbool32_t test_vmflt_vf_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vmflt_vf_f64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmflt_vv_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vbool16_t test_vmflt_vv_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vmflt_vv_f64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmflt_vf_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vbool16_t test_vmflt_vf_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vmflt_vf_f64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmflt_vv_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vbool8_t test_vmflt_vv_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, + vfloat64m8_t vs1, size_t vl) { return __riscv_vmflt_vv_f64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmflt_vf_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vbool8_t test_vmflt_vf_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, + double rs1, size_t vl) { return __riscv_vmflt_vf_f64m8_b8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vmfne.c b/auto-generated/policy_funcs/llvm-api-tests/vmfne.c index 142d24a82..8a38e2fff 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vmfne.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vmfne.c @@ -1,127 +1,169 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vbool64_t test_vmfne_vv_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, vfloat16mf4_t vs1, size_t vl) { +vbool64_t test_vmfne_vv_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat16mf4_t vs2, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vmfne_vv_f16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfne_vf_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vfloat16mf4_t vs2, _Float16 rs1, size_t vl) { +vbool64_t test_vmfne_vf_f16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat16mf4_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vmfne_vf_f16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfne_vv_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, vfloat16mf2_t vs1, size_t vl) { +vbool32_t test_vmfne_vv_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat16mf2_t vs2, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vmfne_vv_f16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfne_vf_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat16mf2_t vs2, _Float16 rs1, size_t vl) { +vbool32_t test_vmfne_vf_f16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat16mf2_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vmfne_vf_f16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfne_vv_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) { +vbool16_t test_vmfne_vv_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat16m1_t vs2, vfloat16m1_t vs1, + size_t vl) { return __riscv_vmfne_vv_f16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfne_vf_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, vfloat16m1_t vs2, _Float16 rs1, size_t vl) { +vbool16_t test_vmfne_vf_f16m1_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat16m1_t vs2, _Float16 rs1, + size_t vl) { return __riscv_vmfne_vf_f16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfne_vv_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) { +vbool8_t test_vmfne_vv_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, + vfloat16m2_t vs1, size_t vl) { return __riscv_vmfne_vv_f16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfne_vf_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, _Float16 rs1, size_t vl) { +vbool8_t test_vmfne_vf_f16m2_b8_mu(vbool8_t vm, vbool8_t vd, vfloat16m2_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vmfne_vf_f16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmfne_vv_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) { +vbool4_t test_vmfne_vv_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, + vfloat16m4_t vs1, size_t vl) { return __riscv_vmfne_vv_f16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmfne_vf_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, _Float16 rs1, size_t vl) { +vbool4_t test_vmfne_vf_f16m4_b4_mu(vbool4_t vm, vbool4_t vd, vfloat16m4_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vmfne_vf_f16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmfne_vv_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, vfloat16m8_t vs1, size_t vl) { +vbool2_t test_vmfne_vv_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, + vfloat16m8_t vs1, size_t vl) { return __riscv_vmfne_vv_f16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmfne_vf_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, _Float16 rs1, size_t vl) { +vbool2_t test_vmfne_vf_f16m8_b2_mu(vbool2_t vm, vbool2_t vd, vfloat16m8_t vs2, + _Float16 rs1, size_t vl) { return __riscv_vmfne_vf_f16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmfne_vv_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, vfloat32mf2_t vs1, size_t vl) { +vbool64_t test_vmfne_vv_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat32mf2_t vs2, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vmfne_vv_f32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfne_vf_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vfloat32mf2_t vs2, float rs1, size_t vl) { +vbool64_t test_vmfne_vf_f32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat32mf2_t vs2, float rs1, size_t vl) { return __riscv_vmfne_vf_f32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfne_vv_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) { +vbool32_t test_vmfne_vv_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat32m1_t vs2, vfloat32m1_t vs1, + size_t vl) { return __riscv_vmfne_vv_f32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfne_vf_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, vfloat32m1_t vs2, float rs1, size_t vl) { +vbool32_t test_vmfne_vf_f32m1_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat32m1_t vs2, float rs1, size_t vl) { return __riscv_vmfne_vf_f32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfne_vv_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) { +vbool16_t test_vmfne_vv_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat32m2_t vs2, vfloat32m2_t vs1, + size_t vl) { return __riscv_vmfne_vv_f32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfne_vf_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, vfloat32m2_t vs2, float rs1, size_t vl) { +vbool16_t test_vmfne_vf_f32m2_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat32m2_t vs2, float rs1, size_t vl) { return __riscv_vmfne_vf_f32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfne_vv_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) { +vbool8_t test_vmfne_vv_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, + vfloat32m4_t vs1, size_t vl) { return __riscv_vmfne_vv_f32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfne_vf_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, float rs1, size_t vl) { +vbool8_t test_vmfne_vf_f32m4_b8_mu(vbool8_t vm, vbool8_t vd, vfloat32m4_t vs2, + float rs1, size_t vl) { return __riscv_vmfne_vf_f32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmfne_vv_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, vfloat32m8_t vs1, size_t vl) { +vbool4_t test_vmfne_vv_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, + vfloat32m8_t vs1, size_t vl) { return __riscv_vmfne_vv_f32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmfne_vf_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, float rs1, size_t vl) { +vbool4_t test_vmfne_vf_f32m8_b4_mu(vbool4_t vm, vbool4_t vd, vfloat32m8_t vs2, + float rs1, size_t vl) { return __riscv_vmfne_vf_f32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmfne_vv_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) { +vbool64_t test_vmfne_vv_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat64m1_t vs2, vfloat64m1_t vs1, + size_t vl) { return __riscv_vmfne_vv_f64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmfne_vf_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, vfloat64m1_t vs2, double rs1, size_t vl) { +vbool64_t test_vmfne_vf_f64m1_b64_mu(vbool64_t vm, vbool64_t vd, + vfloat64m1_t vs2, double rs1, size_t vl) { return __riscv_vmfne_vf_f64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmfne_vv_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) { +vbool32_t test_vmfne_vv_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat64m2_t vs2, vfloat64m2_t vs1, + size_t vl) { return __riscv_vmfne_vv_f64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmfne_vf_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, vfloat64m2_t vs2, double rs1, size_t vl) { +vbool32_t test_vmfne_vf_f64m2_b32_mu(vbool32_t vm, vbool32_t vd, + vfloat64m2_t vs2, double rs1, size_t vl) { return __riscv_vmfne_vf_f64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmfne_vv_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) { +vbool16_t test_vmfne_vv_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat64m4_t vs2, vfloat64m4_t vs1, + size_t vl) { return __riscv_vmfne_vv_f64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmfne_vf_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, vfloat64m4_t vs2, double rs1, size_t vl) { +vbool16_t test_vmfne_vf_f64m4_b16_mu(vbool16_t vm, vbool16_t vd, + vfloat64m4_t vs2, double rs1, size_t vl) { return __riscv_vmfne_vf_f64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmfne_vv_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, vfloat64m8_t vs1, size_t vl) { +vbool8_t test_vmfne_vv_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, + vfloat64m8_t vs1, size_t vl) { return __riscv_vmfne_vv_f64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmfne_vf_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, double rs1, size_t vl) { +vbool8_t test_vmfne_vf_f64m8_b8_mu(vbool8_t vm, vbool8_t vd, vfloat64m8_t vs2, + double rs1, size_t vl) { return __riscv_vmfne_vf_f64m8_b8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vmin.c b/auto-generated/policy_funcs/llvm-api-tests/vmin.c index 1b3d1ed4e..9f9987f03 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vmin.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vmin.c @@ -5,706 +5,891 @@ #include -vint8mf8_t test_vmin_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vmin_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, + size_t vl) { return __riscv_vmin_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vmin_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vmin_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vmin_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vmin_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vmin_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, + size_t vl) { return __riscv_vmin_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vmin_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vmin_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vmin_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vmin_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vmin_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, + size_t vl) { return __riscv_vmin_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vmin_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vmin_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vmin_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vmin_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vmin_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vmin_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vmin_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vmin_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, + size_t vl) { return __riscv_vmin_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vmin_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vmin_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, + size_t vl) { return __riscv_vmin_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vmin_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vmin_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vmin_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vmin_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vmin_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, + size_t vl) { return __riscv_vmin_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vmin_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vmin_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vmin_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vmin_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vmin_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, + size_t vl) { return __riscv_vmin_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vmin_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vmin_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vmin_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vmin_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vmin_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + vint16mf4_t vs1, size_t vl) { return __riscv_vmin_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vmin_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vmin_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vmin_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vmin_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vmin_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vmin_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vmin_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vmin_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vmin_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vmin_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vmin_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vmin_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vmin_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vmin_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, + size_t vl) { return __riscv_vmin_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vmin_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vmin_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, + size_t vl) { return __riscv_vmin_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vmin_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vmin_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vmin_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vmin_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vmin_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, + size_t vl) { return __riscv_vmin_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vmin_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vmin_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vmin_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vmin_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vmin_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, + size_t vl) { return __riscv_vmin_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vmin_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vmin_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, + size_t vl) { return __riscv_vmin_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vmin_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vmin_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vmin_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vmin_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vmin_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vmin_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vmin_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vmin_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vmin_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vmin_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vmin_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, + size_t vl) { return __riscv_vmin_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vmin_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vmin_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, + size_t vl) { return __riscv_vmin_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vmin_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vmin_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vmin_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vmin_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vmin_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, + size_t vl) { return __riscv_vmin_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vmin_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vmin_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, + size_t vl) { return __riscv_vmin_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vmin_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vmin_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, + size_t vl) { return __riscv_vmin_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vmin_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vmin_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, + size_t vl) { return __riscv_vmin_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vmin_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vmin_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vmin_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vmin_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vmin_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, + size_t vl) { return __riscv_vmin_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vmin_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vmin_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, + size_t vl) { return __riscv_vmin_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vmin_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vmin_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, + size_t vl) { return __riscv_vmin_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vmin_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vmin_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, + size_t vl) { return __riscv_vmin_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vmin_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vmin_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, + size_t vl) { return __riscv_vmin_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vmin_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vmin_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, + size_t vl) { return __riscv_vmin_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vmin_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vmin_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, + size_t vl) { return __riscv_vmin_vx_i64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vmin_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vmin_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vmin_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmin_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vmin_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmin_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmin_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vmin_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vmin_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmin_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vmin_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmin_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmin_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vmin_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vmin_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmin_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vmin_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmin_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmin_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vmin_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vmin_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmin_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vmin_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmin_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmin_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vmin_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vmin_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmin_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vmin_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmin_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmin_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vmin_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vmin_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmin_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vmin_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmin_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmin_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vmin_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vmin_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmin_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vmin_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmin_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmin_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vmin_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vmin_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmin_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vmin_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vmin_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmin_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vmin_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vmin_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmin_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vmin_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vmin_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmin_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vmin_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vmin_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmin_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vmin_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmin_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmin_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vmin_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vmin_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmin_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vmin_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmin_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmin_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vmin_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vmin_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmin_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vmin_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmin_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmin_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vmin_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vmin_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmin_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vmin_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmin_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmin_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vmin_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vmin_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmin_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vmin_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vmin_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmin_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vmin_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vmin_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmin_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vmin_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmin_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmin_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vmin_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vmin_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmin_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vmin_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmin_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmin_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vmin_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vmin_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmin_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vmin_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmin_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmin_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vmin_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vmin_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmin_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vmin_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmin_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmin_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vmin_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vmin_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmin_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vmin_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmin_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmin_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vmin_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vmin_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmin_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vmin_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmin_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmin_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vmin_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vmin_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmin_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vmin_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmin_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmin_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vmin_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vmin_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmin_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vmin_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmin_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vmin_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vmin_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vmin_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmin_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vmin_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmin_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmin_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vmin_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vmin_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmin_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vmin_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmin_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmin_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vmin_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vmin_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmin_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vmin_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmin_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmin_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vmin_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vmin_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmin_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vmin_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmin_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmin_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vmin_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vmin_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmin_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vmin_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmin_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmin_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vmin_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vmin_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmin_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vmin_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmin_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmin_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vmin_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vmin_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmin_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vmin_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmin_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmin_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vmin_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vmin_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmin_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vmin_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vmin_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmin_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vmin_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vmin_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmin_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vmin_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vmin_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmin_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vmin_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vmin_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmin_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vmin_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmin_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmin_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vmin_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vmin_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmin_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vmin_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmin_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmin_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vmin_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vmin_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmin_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vmin_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmin_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmin_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vmin_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vmin_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmin_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vmin_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmin_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmin_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vmin_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vmin_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmin_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vmin_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vmin_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmin_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vmin_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vmin_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmin_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vmin_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmin_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmin_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vmin_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vmin_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmin_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vmin_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmin_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmin_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vmin_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vmin_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmin_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vmin_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmin_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmin_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vmin_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vmin_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmin_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vmin_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmin_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmin_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vmin_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vmin_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmin_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vmin_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmin_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmin_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vmin_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vmin_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmin_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vmin_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmin_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmin_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vmin_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vmin_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmin_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vmin_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmin_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmin_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vmin_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vmin_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmin_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vmin_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmin_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vmin_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vmin_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vmin_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmin_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vmin_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmin_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmin_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vmin_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vmin_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmin_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vmin_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmin_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmin_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vmin_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vmin_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmin_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vmin_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmin_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmin_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vmin_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vmin_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmin_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vmin_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmin_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmin_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vmin_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vmin_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmin_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vmin_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmin_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmin_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vmin_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vmin_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmin_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vmin_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmin_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmin_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vmin_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vmin_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmin_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vmin_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmin_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmin_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vmin_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vmin_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmin_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vmin_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vmin_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmin_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vmin_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vmin_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmin_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vmin_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vmin_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmin_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vmin_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vmin_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmin_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vmin_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmin_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmin_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vmin_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vmin_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmin_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vmin_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmin_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmin_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vmin_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vmin_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmin_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vmin_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmin_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmin_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vmin_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vmin_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmin_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vmin_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmin_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmin_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vmin_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vmin_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmin_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vmin_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vmin_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmin_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vmin_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vmin_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmin_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vmin_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmin_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmin_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vmin_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vmin_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmin_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vmin_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmin_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmin_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vmin_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vmin_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmin_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vmin_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmin_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmin_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vmin_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vmin_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmin_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vmin_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmin_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmin_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vmin_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vmin_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmin_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vmin_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmin_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmin_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vmin_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vmin_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmin_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vmin_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmin_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmin_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vmin_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vmin_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmin_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vmin_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmin_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmin_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vmin_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vmin_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmin_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vmin_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmin_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vminu.c b/auto-generated/policy_funcs/llvm-api-tests/vminu.c index 224966b85..adce43777 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vminu.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vminu.c @@ -5,706 +5,939 @@ #include -vuint8mf8_t test_vminu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vminu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vminu_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vminu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vminu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vminu_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vminu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vminu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vminu_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vminu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vminu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vminu_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vminu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vminu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vminu_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vminu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vminu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vminu_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vminu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vminu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vminu_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vminu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vminu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vminu_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vminu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vminu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vminu_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vminu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vminu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vminu_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vminu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vminu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vminu_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vminu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vminu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vminu_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vminu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vminu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { return __riscv_vminu_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vminu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vminu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vminu_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vminu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vminu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vminu_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vminu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vminu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vminu_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vminu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vminu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vminu_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vminu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vminu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vminu_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vminu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vminu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vminu_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vminu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vminu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vminu_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vminu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vminu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vminu_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vminu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vminu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vminu_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vminu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vminu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vminu_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vminu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vminu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vminu_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vminu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vminu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vminu_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vminu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vminu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vminu_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vminu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vminu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vminu_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vminu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vminu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vminu_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vminu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vminu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vminu_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vminu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vminu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vminu_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vminu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vminu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vminu_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vminu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vminu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vminu_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vminu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vminu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vminu_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vminu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vminu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vminu_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vminu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vminu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vminu_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vminu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vminu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vminu_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vminu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vminu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vminu_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vminu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vminu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vminu_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vminu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vminu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vminu_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vminu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vminu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vminu_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vminu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vminu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vminu_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vminu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vminu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vminu_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vminu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vminu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vminu_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vminu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vminu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vminu_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vminu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vminu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vminu_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vminu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vminu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vminu_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vminu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vminu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vminu_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vminu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vminu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vminu_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vminu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vminu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vminu_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vminu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vminu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vminu_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vminu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vminu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vminu_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vminu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vminu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vminu_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vminu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vminu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vminu_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vminu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vminu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vminu_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vminu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vminu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vminu_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vminu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vminu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vminu_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vminu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vminu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vminu_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vminu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vminu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vminu_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vminu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vminu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vminu_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vminu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vminu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vminu_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vminu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vminu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vminu_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vminu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vminu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vminu_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vminu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vminu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vminu_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vminu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vminu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vminu_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vminu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vminu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vminu_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vminu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vminu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vminu_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vminu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vminu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vminu_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vminu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vminu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vminu_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vminu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vminu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vminu_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vminu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vminu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vminu_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vminu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vminu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vminu_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vminu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vminu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vminu_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vminu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vminu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vminu_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vminu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vminu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vminu_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vminu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vminu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vminu_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vminu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vminu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vminu_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vminu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vminu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vminu_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vminu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vminu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vminu_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vminu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vminu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vminu_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vminu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vminu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { return __riscv_vminu_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vminu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vminu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vminu_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vminu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vminu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vminu_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vminu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vminu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vminu_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vminu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vminu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vminu_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vminu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vminu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vminu_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vminu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vminu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vminu_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vminu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vminu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vminu_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vminu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vminu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vminu_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vminu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vminu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vminu_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vminu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vminu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vminu_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vminu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vminu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vminu_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vminu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vminu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vminu_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vminu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vminu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vminu_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vminu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vminu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vminu_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vminu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vminu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vminu_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vminu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vminu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vminu_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vminu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vminu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vminu_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vminu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vminu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vminu_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vminu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vminu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vminu_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vminu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vminu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vminu_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vminu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vminu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vminu_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vminu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vminu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vminu_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vminu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vminu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vminu_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vminu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vminu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vminu_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vminu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vminu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vminu_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vminu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vminu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vminu_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vminu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vminu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vminu_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vminu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vminu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vminu_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vminu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vminu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vminu_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vminu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vminu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vminu_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vminu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vminu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vminu_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vminu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vminu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vminu_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vminu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vminu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vminu_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vminu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vminu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vminu_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vminu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vminu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vminu_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vminu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vminu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vminu_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vminu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vminu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vminu_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vminu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vminu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vminu_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vminu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vminu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vminu_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vminu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vminu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vminu_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vminu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vminu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vminu_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vminu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vminu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vminu_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vminu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vminu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vminu_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vminu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vminu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { return __riscv_vminu_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vminu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vminu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vminu_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vminu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vminu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vminu_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vminu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vminu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vminu_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vminu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vminu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vminu_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vminu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vminu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vminu_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vminu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vminu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vminu_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vminu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vminu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vminu_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vminu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vminu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vminu_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vminu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vminu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vminu_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vminu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vminu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vminu_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vminu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vminu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vminu_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vminu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vminu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vminu_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vminu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vminu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vminu_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vminu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vminu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vminu_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vminu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vminu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vminu_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vminu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vminu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vminu_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vminu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vminu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vminu_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vminu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vminu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vminu_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vminu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vminu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vminu_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vminu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vminu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vminu_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vminu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vminu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vminu_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vminu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vminu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vminu_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vminu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vminu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vminu_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vminu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vminu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vminu_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vminu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vminu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vminu_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vminu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vminu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vminu_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vminu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vminu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vminu_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vminu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vminu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vminu_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vminu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vminu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vminu_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vminu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vminu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vminu_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vminu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vminu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vminu_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vminu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vminu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vminu_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vminu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vminu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vminu_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vminu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vminu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vminu_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vminu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vminu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vminu_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vminu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vminu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vminu_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vminu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vminu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vminu_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vminu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vminu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vminu_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vminu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vminu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vminu_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vminu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vminu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vminu_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vminu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vminu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vminu_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vminu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vminu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vminu_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vminu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vminu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vminu_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vminu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vminu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vminu_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vminu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vminu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vminu_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vminu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vminu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vminu_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vminu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vminu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vminu_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vminu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vminu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vminu_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vminu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vminu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vminu_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vminu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vminu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vminu_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vminu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vminu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vminu_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vminu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vminu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vminu_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vmsbf.c b/auto-generated/policy_funcs/llvm-api-tests/vmsbf.c index e8fed2f23..a6e4bf118 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vmsbf.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vmsbf.c @@ -21,14 +21,17 @@ vbool8_t test_vmsbf_m_b8_mu(vbool8_t vm, vbool8_t vd, vbool8_t vs2, size_t vl) { return __riscv_vmsbf_m_b8_mu(vm, vd, vs2, vl); } -vbool16_t test_vmsbf_m_b16_mu(vbool16_t vm, vbool16_t vd, vbool16_t vs2, size_t vl) { +vbool16_t test_vmsbf_m_b16_mu(vbool16_t vm, vbool16_t vd, vbool16_t vs2, + size_t vl) { return __riscv_vmsbf_m_b16_mu(vm, vd, vs2, vl); } -vbool32_t test_vmsbf_m_b32_mu(vbool32_t vm, vbool32_t vd, vbool32_t vs2, size_t vl) { +vbool32_t test_vmsbf_m_b32_mu(vbool32_t vm, vbool32_t vd, vbool32_t vs2, + size_t vl) { return __riscv_vmsbf_m_b32_mu(vm, vd, vs2, vl); } -vbool64_t test_vmsbf_m_b64_mu(vbool64_t vm, vbool64_t vd, vbool64_t vs2, size_t vl) { +vbool64_t test_vmsbf_m_b64_mu(vbool64_t vm, vbool64_t vd, vbool64_t vs2, + size_t vl) { return __riscv_vmsbf_m_b64_mu(vm, vd, vs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vmseq.c b/auto-generated/policy_funcs/llvm-api-tests/vmseq.c index 70952eaba..505150283 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vmseq.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vmseq.c @@ -1,359 +1,465 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vbool64_t test_vmseq_vv_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vbool64_t test_vmseq_vv_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vmseq_vv_i8mf8_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vbool64_t test_vmseq_vx_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmseq_vx_i8mf8_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vbool32_t test_vmseq_vv_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vmseq_vv_i8mf4_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vbool32_t test_vmseq_vx_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmseq_vx_i8mf4_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vbool16_t test_vmseq_vv_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vmseq_vv_i8mf2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vbool16_t test_vmseq_vx_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmseq_vx_i8mf2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vbool8_t test_vmseq_vv_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vmseq_vv_i8m1_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vbool8_t test_vmseq_vx_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmseq_vx_i8m1_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmseq_vv_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vbool4_t test_vmseq_vv_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vmseq_vv_i8m2_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmseq_vx_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vbool4_t test_vmseq_vx_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmseq_vx_i8m2_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmseq_vv_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vbool2_t test_vmseq_vv_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vmseq_vv_i8m4_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmseq_vx_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vbool2_t test_vmseq_vx_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmseq_vx_i8m4_b2_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmseq_vv_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vbool1_t test_vmseq_vv_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vmseq_vv_i8m8_b1_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmseq_vx_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vbool1_t test_vmseq_vx_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmseq_vx_i8m8_b1_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vbool64_t test_vmseq_vv_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vmseq_vv_i16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vbool64_t test_vmseq_vx_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vmseq_vx_i16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vbool32_t test_vmseq_vv_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vmseq_vv_i16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vbool32_t test_vmseq_vx_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vmseq_vx_i16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vbool16_t test_vmseq_vv_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vmseq_vv_i16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vbool16_t test_vmseq_vx_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmseq_vx_i16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vbool8_t test_vmseq_vv_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vmseq_vv_i16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vbool8_t test_vmseq_vx_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmseq_vx_i16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmseq_vv_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vbool4_t test_vmseq_vv_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vmseq_vv_i16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmseq_vx_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vbool4_t test_vmseq_vx_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmseq_vx_i16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmseq_vv_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vbool2_t test_vmseq_vv_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vmseq_vv_i16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmseq_vx_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vbool2_t test_vmseq_vx_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmseq_vx_i16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vbool64_t test_vmseq_vv_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vmseq_vv_i32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vbool64_t test_vmseq_vx_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vmseq_vx_i32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vbool32_t test_vmseq_vv_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vmseq_vv_i32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vbool32_t test_vmseq_vx_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmseq_vx_i32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vbool16_t test_vmseq_vv_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vmseq_vv_i32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vbool16_t test_vmseq_vx_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmseq_vx_i32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vbool8_t test_vmseq_vv_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vmseq_vv_i32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vbool8_t test_vmseq_vx_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmseq_vx_i32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmseq_vv_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vbool4_t test_vmseq_vv_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vmseq_vv_i32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmseq_vx_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vbool4_t test_vmseq_vx_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmseq_vx_i32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vbool64_t test_vmseq_vv_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vmseq_vv_i64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vbool64_t test_vmseq_vx_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmseq_vx_i64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vbool32_t test_vmseq_vv_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vmseq_vv_i64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vbool32_t test_vmseq_vx_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmseq_vx_i64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vbool16_t test_vmseq_vv_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vmseq_vv_i64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vbool16_t test_vmseq_vx_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmseq_vx_i64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vbool8_t test_vmseq_vv_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vmseq_vv_i64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vbool8_t test_vmseq_vx_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmseq_vx_i64m8_b8_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vbool64_t test_vmseq_vv_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vmseq_vv_u8mf8_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vbool64_t test_vmseq_vx_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmseq_vx_u8mf8_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vbool32_t test_vmseq_vv_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vmseq_vv_u8mf4_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vbool32_t test_vmseq_vx_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmseq_vx_u8mf4_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vbool16_t test_vmseq_vv_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vmseq_vv_u8mf2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vbool16_t test_vmseq_vx_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmseq_vx_u8mf2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vbool8_t test_vmseq_vv_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vmseq_vv_u8m1_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vbool8_t test_vmseq_vx_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmseq_vx_u8m1_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmseq_vv_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vbool4_t test_vmseq_vv_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vmseq_vv_u8m2_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmseq_vx_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vbool4_t test_vmseq_vx_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmseq_vx_u8m2_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmseq_vv_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vbool2_t test_vmseq_vv_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vmseq_vv_u8m4_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmseq_vx_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vbool2_t test_vmseq_vx_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmseq_vx_u8m4_b2_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmseq_vv_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vbool1_t test_vmseq_vv_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vmseq_vv_u8m8_b1_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmseq_vx_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vbool1_t test_vmseq_vx_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmseq_vx_u8m8_b1_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vbool64_t test_vmseq_vv_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vmseq_vv_u16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vbool64_t test_vmseq_vx_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmseq_vx_u16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vbool32_t test_vmseq_vv_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vmseq_vv_u16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vbool32_t test_vmseq_vx_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmseq_vx_u16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vbool16_t test_vmseq_vv_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vmseq_vv_u16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vbool16_t test_vmseq_vx_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vmseq_vx_u16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vbool8_t test_vmseq_vv_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vmseq_vv_u16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vbool8_t test_vmseq_vx_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmseq_vx_u16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmseq_vv_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vbool4_t test_vmseq_vv_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vmseq_vv_u16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmseq_vx_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vbool4_t test_vmseq_vx_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmseq_vx_u16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmseq_vv_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vbool2_t test_vmseq_vv_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vmseq_vv_u16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmseq_vx_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vbool2_t test_vmseq_vx_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmseq_vx_u16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vbool64_t test_vmseq_vv_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vmseq_vv_u32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vbool64_t test_vmseq_vx_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmseq_vx_u32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vbool32_t test_vmseq_vv_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vmseq_vv_u32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vbool32_t test_vmseq_vx_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmseq_vx_u32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vbool16_t test_vmseq_vv_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vmseq_vv_u32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vbool16_t test_vmseq_vx_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmseq_vx_u32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vbool8_t test_vmseq_vv_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vmseq_vv_u32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vbool8_t test_vmseq_vx_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmseq_vx_u32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmseq_vv_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vbool4_t test_vmseq_vv_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vmseq_vv_u32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmseq_vx_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vbool4_t test_vmseq_vx_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmseq_vx_u32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmseq_vv_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vbool64_t test_vmseq_vv_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vmseq_vv_u64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmseq_vx_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vbool64_t test_vmseq_vx_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmseq_vx_u64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmseq_vv_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vbool32_t test_vmseq_vv_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vmseq_vv_u64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmseq_vx_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vbool32_t test_vmseq_vx_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmseq_vx_u64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmseq_vv_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vbool16_t test_vmseq_vv_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vmseq_vv_u64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmseq_vx_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vbool16_t test_vmseq_vx_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmseq_vx_u64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmseq_vv_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vbool8_t test_vmseq_vv_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vmseq_vv_u64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmseq_vx_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vbool8_t test_vmseq_vx_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vmseq_vx_u64m8_b8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vmsge.c b/auto-generated/policy_funcs/llvm-api-tests/vmsge.c index 38c42dd59..7a93cf9f9 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vmsge.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vmsge.c @@ -1,183 +1,230 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vbool64_t test_vmsge_vv_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vbool64_t test_vmsge_vv_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vmsge_vv_i8mf8_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsge_vx_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vbool64_t test_vmsge_vx_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmsge_vx_i8mf8_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsge_vv_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vbool32_t test_vmsge_vv_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vmsge_vv_i8mf4_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsge_vx_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vbool32_t test_vmsge_vx_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmsge_vx_i8mf4_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsge_vv_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vbool16_t test_vmsge_vv_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vmsge_vv_i8mf2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsge_vx_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vbool16_t test_vmsge_vx_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmsge_vx_i8mf2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsge_vv_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vbool8_t test_vmsge_vv_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vmsge_vv_i8m1_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsge_vx_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vbool8_t test_vmsge_vx_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmsge_vx_i8m1_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsge_vv_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vbool4_t test_vmsge_vv_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vmsge_vv_i8m2_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsge_vx_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vbool4_t test_vmsge_vx_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmsge_vx_i8m2_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsge_vv_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vbool2_t test_vmsge_vv_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vmsge_vv_i8m4_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsge_vx_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vbool2_t test_vmsge_vx_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmsge_vx_i8m4_b2_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmsge_vv_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vbool1_t test_vmsge_vv_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vmsge_vv_i8m8_b1_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmsge_vx_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vbool1_t test_vmsge_vx_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmsge_vx_i8m8_b1_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsge_vv_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vbool64_t test_vmsge_vv_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vmsge_vv_i16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsge_vx_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vbool64_t test_vmsge_vx_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vmsge_vx_i16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsge_vv_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vbool32_t test_vmsge_vv_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vmsge_vv_i16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsge_vx_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vbool32_t test_vmsge_vx_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vmsge_vx_i16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsge_vv_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vbool16_t test_vmsge_vv_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vmsge_vv_i16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsge_vx_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vbool16_t test_vmsge_vx_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmsge_vx_i16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsge_vv_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vbool8_t test_vmsge_vv_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vmsge_vv_i16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsge_vx_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vbool8_t test_vmsge_vx_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmsge_vx_i16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsge_vv_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vbool4_t test_vmsge_vv_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vmsge_vv_i16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsge_vx_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vbool4_t test_vmsge_vx_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmsge_vx_i16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsge_vv_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vbool2_t test_vmsge_vv_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vmsge_vv_i16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsge_vx_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vbool2_t test_vmsge_vx_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmsge_vx_i16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsge_vv_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vbool64_t test_vmsge_vv_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vmsge_vv_i32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsge_vx_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vbool64_t test_vmsge_vx_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vmsge_vx_i32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsge_vv_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vbool32_t test_vmsge_vv_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vmsge_vv_i32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsge_vx_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vbool32_t test_vmsge_vx_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmsge_vx_i32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsge_vv_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vbool16_t test_vmsge_vv_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vmsge_vv_i32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsge_vx_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vbool16_t test_vmsge_vx_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmsge_vx_i32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsge_vv_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vbool8_t test_vmsge_vv_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vmsge_vv_i32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsge_vx_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vbool8_t test_vmsge_vx_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmsge_vx_i32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsge_vv_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vbool4_t test_vmsge_vv_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vmsge_vv_i32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsge_vx_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vbool4_t test_vmsge_vx_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmsge_vx_i32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsge_vv_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vbool64_t test_vmsge_vv_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vmsge_vv_i64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsge_vx_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vbool64_t test_vmsge_vx_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmsge_vx_i64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsge_vv_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vbool32_t test_vmsge_vv_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vmsge_vv_i64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsge_vx_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vbool32_t test_vmsge_vx_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmsge_vx_i64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsge_vv_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vbool16_t test_vmsge_vv_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vmsge_vv_i64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsge_vx_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vbool16_t test_vmsge_vx_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmsge_vx_i64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsge_vv_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vbool8_t test_vmsge_vv_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vmsge_vv_i64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsge_vx_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vbool8_t test_vmsge_vx_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmsge_vx_i64m8_b8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vmsgeu.c b/auto-generated/policy_funcs/llvm-api-tests/vmsgeu.c index c5ebbb932..191666ccf 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vmsgeu.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vmsgeu.c @@ -1,183 +1,248 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vbool64_t test_vmsgeu_vv_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vbool64_t test_vmsgeu_vv_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vmsgeu_vv_u8mf8_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgeu_vx_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vbool64_t test_vmsgeu_vx_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmsgeu_vx_u8mf8_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgeu_vv_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vbool32_t test_vmsgeu_vv_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vmsgeu_vv_u8mf4_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgeu_vx_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vbool32_t test_vmsgeu_vx_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmsgeu_vx_u8mf4_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgeu_vv_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vbool16_t test_vmsgeu_vv_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vmsgeu_vv_u8mf2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgeu_vx_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vbool16_t test_vmsgeu_vx_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmsgeu_vx_u8mf2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgeu_vv_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vbool8_t test_vmsgeu_vv_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vmsgeu_vv_u8m1_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgeu_vx_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vbool8_t test_vmsgeu_vx_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmsgeu_vx_u8m1_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsgeu_vv_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vbool4_t test_vmsgeu_vv_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vmsgeu_vv_u8m2_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsgeu_vx_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vbool4_t test_vmsgeu_vx_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmsgeu_vx_u8m2_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsgeu_vv_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vbool2_t test_vmsgeu_vv_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vmsgeu_vv_u8m4_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsgeu_vx_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vbool2_t test_vmsgeu_vx_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmsgeu_vx_u8m4_b2_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmsgeu_vv_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vbool1_t test_vmsgeu_vv_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vmsgeu_vv_u8m8_b1_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmsgeu_vx_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vbool1_t test_vmsgeu_vx_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmsgeu_vx_u8m8_b1_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsgeu_vv_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vbool64_t test_vmsgeu_vv_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vmsgeu_vv_u16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgeu_vx_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vbool64_t test_vmsgeu_vx_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmsgeu_vx_u16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgeu_vv_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vbool32_t test_vmsgeu_vv_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vmsgeu_vv_u16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgeu_vx_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vbool32_t test_vmsgeu_vx_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmsgeu_vx_u16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgeu_vv_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vbool16_t test_vmsgeu_vv_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vmsgeu_vv_u16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgeu_vx_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vbool16_t test_vmsgeu_vx_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, + vuint16m1_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmsgeu_vx_u16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgeu_vv_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vbool8_t test_vmsgeu_vv_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vmsgeu_vv_u16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgeu_vx_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vbool8_t test_vmsgeu_vx_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmsgeu_vx_u16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsgeu_vv_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vbool4_t test_vmsgeu_vv_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vmsgeu_vv_u16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsgeu_vx_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vbool4_t test_vmsgeu_vx_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmsgeu_vx_u16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsgeu_vv_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vbool2_t test_vmsgeu_vv_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vmsgeu_vv_u16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsgeu_vx_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vbool2_t test_vmsgeu_vx_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmsgeu_vx_u16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsgeu_vv_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vbool64_t test_vmsgeu_vv_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vmsgeu_vv_u32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgeu_vx_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vbool64_t test_vmsgeu_vx_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmsgeu_vx_u32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgeu_vv_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vbool32_t test_vmsgeu_vv_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vmsgeu_vv_u32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgeu_vx_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vbool32_t test_vmsgeu_vx_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, + vuint32m1_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmsgeu_vx_u32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgeu_vv_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vbool16_t test_vmsgeu_vv_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vmsgeu_vv_u32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgeu_vx_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vbool16_t test_vmsgeu_vx_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, + vuint32m2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmsgeu_vx_u32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgeu_vv_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vbool8_t test_vmsgeu_vv_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vmsgeu_vv_u32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgeu_vx_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vbool8_t test_vmsgeu_vx_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmsgeu_vx_u32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsgeu_vv_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vbool4_t test_vmsgeu_vv_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vmsgeu_vv_u32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsgeu_vx_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vbool4_t test_vmsgeu_vx_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmsgeu_vx_u32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsgeu_vv_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vbool64_t test_vmsgeu_vv_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vmsgeu_vv_u64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgeu_vx_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vbool64_t test_vmsgeu_vx_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vmsgeu_vx_u64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgeu_vv_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vbool32_t test_vmsgeu_vv_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vmsgeu_vv_u64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgeu_vx_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vbool32_t test_vmsgeu_vx_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vmsgeu_vx_u64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgeu_vv_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vbool16_t test_vmsgeu_vv_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vmsgeu_vv_u64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgeu_vx_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vbool16_t test_vmsgeu_vx_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vmsgeu_vx_u64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgeu_vv_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vbool8_t test_vmsgeu_vv_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vmsgeu_vv_u64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgeu_vx_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vbool8_t test_vmsgeu_vx_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vmsgeu_vx_u64m8_b8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vmsgt.c b/auto-generated/policy_funcs/llvm-api-tests/vmsgt.c index 62b84314f..dffc04b9c 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vmsgt.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vmsgt.c @@ -1,183 +1,230 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vbool64_t test_vmsgt_vv_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vbool64_t test_vmsgt_vv_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vmsgt_vv_i8mf8_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgt_vx_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vbool64_t test_vmsgt_vx_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmsgt_vx_i8mf8_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgt_vv_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vbool32_t test_vmsgt_vv_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vmsgt_vv_i8mf4_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgt_vx_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vbool32_t test_vmsgt_vx_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmsgt_vx_i8mf4_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgt_vv_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vbool16_t test_vmsgt_vv_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vmsgt_vv_i8mf2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgt_vx_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vbool16_t test_vmsgt_vx_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmsgt_vx_i8mf2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgt_vv_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vbool8_t test_vmsgt_vv_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vmsgt_vv_i8m1_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgt_vx_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vbool8_t test_vmsgt_vx_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmsgt_vx_i8m1_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsgt_vv_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vbool4_t test_vmsgt_vv_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vmsgt_vv_i8m2_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsgt_vx_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vbool4_t test_vmsgt_vx_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmsgt_vx_i8m2_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsgt_vv_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vbool2_t test_vmsgt_vv_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vmsgt_vv_i8m4_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsgt_vx_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vbool2_t test_vmsgt_vx_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmsgt_vx_i8m4_b2_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmsgt_vv_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vbool1_t test_vmsgt_vv_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vmsgt_vv_i8m8_b1_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmsgt_vx_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vbool1_t test_vmsgt_vx_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmsgt_vx_i8m8_b1_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsgt_vv_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vbool64_t test_vmsgt_vv_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vmsgt_vv_i16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgt_vx_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vbool64_t test_vmsgt_vx_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vmsgt_vx_i16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgt_vv_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vbool32_t test_vmsgt_vv_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vmsgt_vv_i16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgt_vx_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vbool32_t test_vmsgt_vx_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vmsgt_vx_i16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgt_vv_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vbool16_t test_vmsgt_vv_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vmsgt_vv_i16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgt_vx_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vbool16_t test_vmsgt_vx_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmsgt_vx_i16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgt_vv_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vbool8_t test_vmsgt_vv_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vmsgt_vv_i16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgt_vx_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vbool8_t test_vmsgt_vx_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmsgt_vx_i16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsgt_vv_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vbool4_t test_vmsgt_vv_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vmsgt_vv_i16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsgt_vx_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vbool4_t test_vmsgt_vx_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmsgt_vx_i16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsgt_vv_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vbool2_t test_vmsgt_vv_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vmsgt_vv_i16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsgt_vx_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vbool2_t test_vmsgt_vx_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmsgt_vx_i16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsgt_vv_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vbool64_t test_vmsgt_vv_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vmsgt_vv_i32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgt_vx_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vbool64_t test_vmsgt_vx_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vmsgt_vx_i32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgt_vv_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vbool32_t test_vmsgt_vv_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vmsgt_vv_i32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgt_vx_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vbool32_t test_vmsgt_vx_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmsgt_vx_i32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgt_vv_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vbool16_t test_vmsgt_vv_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vmsgt_vv_i32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgt_vx_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vbool16_t test_vmsgt_vx_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmsgt_vx_i32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgt_vv_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vbool8_t test_vmsgt_vv_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vmsgt_vv_i32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgt_vx_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vbool8_t test_vmsgt_vx_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmsgt_vx_i32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsgt_vv_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vbool4_t test_vmsgt_vv_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vmsgt_vv_i32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsgt_vx_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vbool4_t test_vmsgt_vx_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmsgt_vx_i32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsgt_vv_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vbool64_t test_vmsgt_vv_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vmsgt_vv_i64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgt_vx_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vbool64_t test_vmsgt_vx_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmsgt_vx_i64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgt_vv_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vbool32_t test_vmsgt_vv_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vmsgt_vv_i64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgt_vx_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vbool32_t test_vmsgt_vx_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmsgt_vx_i64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgt_vv_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vbool16_t test_vmsgt_vv_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vmsgt_vv_i64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgt_vx_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vbool16_t test_vmsgt_vx_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmsgt_vx_i64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgt_vv_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vbool8_t test_vmsgt_vv_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vmsgt_vv_i64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgt_vx_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vbool8_t test_vmsgt_vx_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmsgt_vx_i64m8_b8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vmsgtu.c b/auto-generated/policy_funcs/llvm-api-tests/vmsgtu.c index af5c1969b..96b221070 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vmsgtu.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vmsgtu.c @@ -1,183 +1,248 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vbool64_t test_vmsgtu_vv_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vbool64_t test_vmsgtu_vv_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vmsgtu_vv_u8mf8_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgtu_vx_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vbool64_t test_vmsgtu_vx_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmsgtu_vx_u8mf8_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgtu_vv_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vbool32_t test_vmsgtu_vv_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vmsgtu_vv_u8mf4_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgtu_vx_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vbool32_t test_vmsgtu_vx_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmsgtu_vx_u8mf4_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgtu_vv_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vbool16_t test_vmsgtu_vv_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vmsgtu_vv_u8mf2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgtu_vx_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vbool16_t test_vmsgtu_vx_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmsgtu_vx_u8mf2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgtu_vv_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vbool8_t test_vmsgtu_vv_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vmsgtu_vv_u8m1_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgtu_vx_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vbool8_t test_vmsgtu_vx_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmsgtu_vx_u8m1_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsgtu_vv_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vbool4_t test_vmsgtu_vv_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vmsgtu_vv_u8m2_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsgtu_vx_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vbool4_t test_vmsgtu_vx_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmsgtu_vx_u8m2_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsgtu_vv_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vbool2_t test_vmsgtu_vv_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vmsgtu_vv_u8m4_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsgtu_vx_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vbool2_t test_vmsgtu_vx_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmsgtu_vx_u8m4_b2_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmsgtu_vv_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vbool1_t test_vmsgtu_vv_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vmsgtu_vv_u8m8_b1_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmsgtu_vx_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vbool1_t test_vmsgtu_vx_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmsgtu_vx_u8m8_b1_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsgtu_vv_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vbool64_t test_vmsgtu_vv_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vmsgtu_vv_u16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgtu_vx_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vbool64_t test_vmsgtu_vx_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmsgtu_vx_u16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgtu_vv_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vbool32_t test_vmsgtu_vv_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vmsgtu_vv_u16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgtu_vx_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vbool32_t test_vmsgtu_vx_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmsgtu_vx_u16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgtu_vv_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vbool16_t test_vmsgtu_vv_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vmsgtu_vv_u16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgtu_vx_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vbool16_t test_vmsgtu_vx_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, + vuint16m1_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmsgtu_vx_u16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgtu_vv_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vbool8_t test_vmsgtu_vv_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vmsgtu_vv_u16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgtu_vx_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vbool8_t test_vmsgtu_vx_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmsgtu_vx_u16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsgtu_vv_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vbool4_t test_vmsgtu_vv_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vmsgtu_vv_u16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsgtu_vx_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vbool4_t test_vmsgtu_vx_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmsgtu_vx_u16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsgtu_vv_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vbool2_t test_vmsgtu_vv_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vmsgtu_vv_u16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsgtu_vx_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vbool2_t test_vmsgtu_vx_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmsgtu_vx_u16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsgtu_vv_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vbool64_t test_vmsgtu_vv_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vmsgtu_vv_u32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgtu_vx_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vbool64_t test_vmsgtu_vx_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmsgtu_vx_u32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgtu_vv_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vbool32_t test_vmsgtu_vv_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vmsgtu_vv_u32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgtu_vx_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vbool32_t test_vmsgtu_vx_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, + vuint32m1_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmsgtu_vx_u32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgtu_vv_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vbool16_t test_vmsgtu_vv_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vmsgtu_vv_u32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgtu_vx_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vbool16_t test_vmsgtu_vx_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, + vuint32m2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmsgtu_vx_u32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgtu_vv_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vbool8_t test_vmsgtu_vv_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vmsgtu_vv_u32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgtu_vx_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vbool8_t test_vmsgtu_vx_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmsgtu_vx_u32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsgtu_vv_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vbool4_t test_vmsgtu_vv_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vmsgtu_vv_u32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsgtu_vx_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vbool4_t test_vmsgtu_vx_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmsgtu_vx_u32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsgtu_vv_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vbool64_t test_vmsgtu_vv_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vmsgtu_vv_u64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsgtu_vx_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vbool64_t test_vmsgtu_vx_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vmsgtu_vx_u64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsgtu_vv_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vbool32_t test_vmsgtu_vv_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vmsgtu_vv_u64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsgtu_vx_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vbool32_t test_vmsgtu_vx_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vmsgtu_vx_u64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsgtu_vv_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vbool16_t test_vmsgtu_vv_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vmsgtu_vv_u64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsgtu_vx_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vbool16_t test_vmsgtu_vx_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vmsgtu_vx_u64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsgtu_vv_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vbool8_t test_vmsgtu_vv_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vmsgtu_vv_u64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsgtu_vx_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vbool8_t test_vmsgtu_vx_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vmsgtu_vx_u64m8_b8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vmsif.c b/auto-generated/policy_funcs/llvm-api-tests/vmsif.c index 429e35992..d91e7391f 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vmsif.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vmsif.c @@ -21,14 +21,17 @@ vbool8_t test_vmsif_m_b8_mu(vbool8_t vm, vbool8_t vd, vbool8_t vs2, size_t vl) { return __riscv_vmsif_m_b8_mu(vm, vd, vs2, vl); } -vbool16_t test_vmsif_m_b16_mu(vbool16_t vm, vbool16_t vd, vbool16_t vs2, size_t vl) { +vbool16_t test_vmsif_m_b16_mu(vbool16_t vm, vbool16_t vd, vbool16_t vs2, + size_t vl) { return __riscv_vmsif_m_b16_mu(vm, vd, vs2, vl); } -vbool32_t test_vmsif_m_b32_mu(vbool32_t vm, vbool32_t vd, vbool32_t vs2, size_t vl) { +vbool32_t test_vmsif_m_b32_mu(vbool32_t vm, vbool32_t vd, vbool32_t vs2, + size_t vl) { return __riscv_vmsif_m_b32_mu(vm, vd, vs2, vl); } -vbool64_t test_vmsif_m_b64_mu(vbool64_t vm, vbool64_t vd, vbool64_t vs2, size_t vl) { +vbool64_t test_vmsif_m_b64_mu(vbool64_t vm, vbool64_t vd, vbool64_t vs2, + size_t vl) { return __riscv_vmsif_m_b64_mu(vm, vd, vs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vmsle.c b/auto-generated/policy_funcs/llvm-api-tests/vmsle.c index 33832a5b2..4998b6b58 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vmsle.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vmsle.c @@ -1,183 +1,230 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vbool64_t test_vmsle_vv_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vbool64_t test_vmsle_vv_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vmsle_vv_i8mf8_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsle_vx_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vbool64_t test_vmsle_vx_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmsle_vx_i8mf8_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsle_vv_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vbool32_t test_vmsle_vv_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vmsle_vv_i8mf4_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsle_vx_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vbool32_t test_vmsle_vx_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmsle_vx_i8mf4_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsle_vv_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vbool16_t test_vmsle_vv_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vmsle_vv_i8mf2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsle_vx_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vbool16_t test_vmsle_vx_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmsle_vx_i8mf2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsle_vv_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vbool8_t test_vmsle_vv_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vmsle_vv_i8m1_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsle_vx_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vbool8_t test_vmsle_vx_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmsle_vx_i8m1_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsle_vv_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vbool4_t test_vmsle_vv_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vmsle_vv_i8m2_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsle_vx_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vbool4_t test_vmsle_vx_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmsle_vx_i8m2_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsle_vv_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vbool2_t test_vmsle_vv_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vmsle_vv_i8m4_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsle_vx_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vbool2_t test_vmsle_vx_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmsle_vx_i8m4_b2_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmsle_vv_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vbool1_t test_vmsle_vv_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vmsle_vv_i8m8_b1_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmsle_vx_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vbool1_t test_vmsle_vx_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmsle_vx_i8m8_b1_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsle_vv_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vbool64_t test_vmsle_vv_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vmsle_vv_i16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsle_vx_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vbool64_t test_vmsle_vx_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vmsle_vx_i16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsle_vv_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vbool32_t test_vmsle_vv_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vmsle_vv_i16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsle_vx_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vbool32_t test_vmsle_vx_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vmsle_vx_i16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsle_vv_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vbool16_t test_vmsle_vv_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vmsle_vv_i16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsle_vx_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vbool16_t test_vmsle_vx_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmsle_vx_i16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsle_vv_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vbool8_t test_vmsle_vv_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vmsle_vv_i16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsle_vx_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vbool8_t test_vmsle_vx_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmsle_vx_i16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsle_vv_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vbool4_t test_vmsle_vv_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vmsle_vv_i16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsle_vx_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vbool4_t test_vmsle_vx_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmsle_vx_i16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsle_vv_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vbool2_t test_vmsle_vv_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vmsle_vv_i16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsle_vx_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vbool2_t test_vmsle_vx_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmsle_vx_i16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsle_vv_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vbool64_t test_vmsle_vv_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vmsle_vv_i32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsle_vx_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vbool64_t test_vmsle_vx_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vmsle_vx_i32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsle_vv_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vbool32_t test_vmsle_vv_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vmsle_vv_i32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsle_vx_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vbool32_t test_vmsle_vx_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmsle_vx_i32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsle_vv_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vbool16_t test_vmsle_vv_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vmsle_vv_i32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsle_vx_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vbool16_t test_vmsle_vx_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmsle_vx_i32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsle_vv_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vbool8_t test_vmsle_vv_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vmsle_vv_i32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsle_vx_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vbool8_t test_vmsle_vx_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmsle_vx_i32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsle_vv_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vbool4_t test_vmsle_vv_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vmsle_vv_i32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsle_vx_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vbool4_t test_vmsle_vx_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmsle_vx_i32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsle_vv_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vbool64_t test_vmsle_vv_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vmsle_vv_i64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsle_vx_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vbool64_t test_vmsle_vx_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmsle_vx_i64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsle_vv_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vbool32_t test_vmsle_vv_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vmsle_vv_i64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsle_vx_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vbool32_t test_vmsle_vx_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmsle_vx_i64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsle_vv_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vbool16_t test_vmsle_vv_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vmsle_vv_i64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsle_vx_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vbool16_t test_vmsle_vx_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmsle_vx_i64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsle_vv_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vbool8_t test_vmsle_vv_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vmsle_vv_i64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsle_vx_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vbool8_t test_vmsle_vx_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmsle_vx_i64m8_b8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vmsleu.c b/auto-generated/policy_funcs/llvm-api-tests/vmsleu.c index 689d3c8a9..e0911f3e1 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vmsleu.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vmsleu.c @@ -1,183 +1,248 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vbool64_t test_vmsleu_vv_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vbool64_t test_vmsleu_vv_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vmsleu_vv_u8mf8_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsleu_vx_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vbool64_t test_vmsleu_vx_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmsleu_vx_u8mf8_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsleu_vv_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vbool32_t test_vmsleu_vv_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vmsleu_vv_u8mf4_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsleu_vx_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vbool32_t test_vmsleu_vx_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmsleu_vx_u8mf4_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsleu_vv_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vbool16_t test_vmsleu_vv_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vmsleu_vv_u8mf2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsleu_vx_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vbool16_t test_vmsleu_vx_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmsleu_vx_u8mf2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsleu_vv_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vbool8_t test_vmsleu_vv_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vmsleu_vv_u8m1_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsleu_vx_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vbool8_t test_vmsleu_vx_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmsleu_vx_u8m1_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsleu_vv_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vbool4_t test_vmsleu_vv_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vmsleu_vv_u8m2_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsleu_vx_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vbool4_t test_vmsleu_vx_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmsleu_vx_u8m2_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsleu_vv_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vbool2_t test_vmsleu_vv_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vmsleu_vv_u8m4_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsleu_vx_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vbool2_t test_vmsleu_vx_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmsleu_vx_u8m4_b2_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmsleu_vv_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vbool1_t test_vmsleu_vv_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vmsleu_vv_u8m8_b1_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmsleu_vx_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vbool1_t test_vmsleu_vx_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmsleu_vx_u8m8_b1_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsleu_vv_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vbool64_t test_vmsleu_vv_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vmsleu_vv_u16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsleu_vx_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vbool64_t test_vmsleu_vx_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmsleu_vx_u16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsleu_vv_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vbool32_t test_vmsleu_vv_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vmsleu_vv_u16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsleu_vx_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vbool32_t test_vmsleu_vx_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmsleu_vx_u16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsleu_vv_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vbool16_t test_vmsleu_vv_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vmsleu_vv_u16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsleu_vx_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vbool16_t test_vmsleu_vx_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, + vuint16m1_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmsleu_vx_u16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsleu_vv_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vbool8_t test_vmsleu_vv_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vmsleu_vv_u16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsleu_vx_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vbool8_t test_vmsleu_vx_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmsleu_vx_u16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsleu_vv_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vbool4_t test_vmsleu_vv_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vmsleu_vv_u16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsleu_vx_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vbool4_t test_vmsleu_vx_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmsleu_vx_u16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsleu_vv_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vbool2_t test_vmsleu_vv_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vmsleu_vv_u16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsleu_vx_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vbool2_t test_vmsleu_vx_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmsleu_vx_u16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsleu_vv_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vbool64_t test_vmsleu_vv_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vmsleu_vv_u32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsleu_vx_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vbool64_t test_vmsleu_vx_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmsleu_vx_u32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsleu_vv_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vbool32_t test_vmsleu_vv_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vmsleu_vv_u32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsleu_vx_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vbool32_t test_vmsleu_vx_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, + vuint32m1_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmsleu_vx_u32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsleu_vv_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vbool16_t test_vmsleu_vv_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vmsleu_vv_u32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsleu_vx_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vbool16_t test_vmsleu_vx_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, + vuint32m2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmsleu_vx_u32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsleu_vv_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vbool8_t test_vmsleu_vv_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vmsleu_vv_u32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsleu_vx_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vbool8_t test_vmsleu_vx_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmsleu_vx_u32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsleu_vv_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vbool4_t test_vmsleu_vv_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vmsleu_vv_u32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsleu_vx_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vbool4_t test_vmsleu_vx_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmsleu_vx_u32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsleu_vv_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vbool64_t test_vmsleu_vv_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vmsleu_vv_u64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsleu_vx_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vbool64_t test_vmsleu_vx_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vmsleu_vx_u64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsleu_vv_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vbool32_t test_vmsleu_vv_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vmsleu_vv_u64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsleu_vx_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vbool32_t test_vmsleu_vx_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vmsleu_vx_u64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsleu_vv_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vbool16_t test_vmsleu_vv_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vmsleu_vv_u64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsleu_vx_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vbool16_t test_vmsleu_vx_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vmsleu_vx_u64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsleu_vv_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vbool8_t test_vmsleu_vv_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vmsleu_vv_u64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsleu_vx_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vbool8_t test_vmsleu_vx_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vmsleu_vx_u64m8_b8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vmslt.c b/auto-generated/policy_funcs/llvm-api-tests/vmslt.c index ecf8118e3..3f05df7e4 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vmslt.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vmslt.c @@ -1,183 +1,230 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vbool64_t test_vmslt_vv_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vbool64_t test_vmslt_vv_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vmslt_vv_i8mf8_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmslt_vx_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vbool64_t test_vmslt_vx_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmslt_vx_i8mf8_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmslt_vv_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vbool32_t test_vmslt_vv_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vmslt_vv_i8mf4_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmslt_vx_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vbool32_t test_vmslt_vx_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmslt_vx_i8mf4_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmslt_vv_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vbool16_t test_vmslt_vv_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vmslt_vv_i8mf2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmslt_vx_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vbool16_t test_vmslt_vx_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmslt_vx_i8mf2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmslt_vv_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vbool8_t test_vmslt_vv_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vmslt_vv_i8m1_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmslt_vx_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vbool8_t test_vmslt_vx_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmslt_vx_i8m1_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmslt_vv_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vbool4_t test_vmslt_vv_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vmslt_vv_i8m2_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmslt_vx_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vbool4_t test_vmslt_vx_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmslt_vx_i8m2_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmslt_vv_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vbool2_t test_vmslt_vv_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vmslt_vv_i8m4_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmslt_vx_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vbool2_t test_vmslt_vx_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmslt_vx_i8m4_b2_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmslt_vv_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vbool1_t test_vmslt_vv_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vmslt_vv_i8m8_b1_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmslt_vx_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vbool1_t test_vmslt_vx_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmslt_vx_i8m8_b1_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmslt_vv_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vbool64_t test_vmslt_vv_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vmslt_vv_i16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmslt_vx_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vbool64_t test_vmslt_vx_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vmslt_vx_i16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmslt_vv_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vbool32_t test_vmslt_vv_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vmslt_vv_i16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmslt_vx_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vbool32_t test_vmslt_vx_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vmslt_vx_i16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmslt_vv_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vbool16_t test_vmslt_vv_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vmslt_vv_i16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmslt_vx_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vbool16_t test_vmslt_vx_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmslt_vx_i16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmslt_vv_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vbool8_t test_vmslt_vv_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vmslt_vv_i16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmslt_vx_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vbool8_t test_vmslt_vx_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmslt_vx_i16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmslt_vv_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vbool4_t test_vmslt_vv_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vmslt_vv_i16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmslt_vx_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vbool4_t test_vmslt_vx_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmslt_vx_i16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmslt_vv_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vbool2_t test_vmslt_vv_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vmslt_vv_i16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmslt_vx_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vbool2_t test_vmslt_vx_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmslt_vx_i16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmslt_vv_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vbool64_t test_vmslt_vv_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vmslt_vv_i32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmslt_vx_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vbool64_t test_vmslt_vx_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vmslt_vx_i32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmslt_vv_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vbool32_t test_vmslt_vv_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vmslt_vv_i32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmslt_vx_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vbool32_t test_vmslt_vx_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmslt_vx_i32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmslt_vv_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vbool16_t test_vmslt_vv_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vmslt_vv_i32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmslt_vx_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vbool16_t test_vmslt_vx_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmslt_vx_i32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmslt_vv_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vbool8_t test_vmslt_vv_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vmslt_vv_i32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmslt_vx_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vbool8_t test_vmslt_vx_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmslt_vx_i32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmslt_vv_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vbool4_t test_vmslt_vv_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vmslt_vv_i32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmslt_vx_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vbool4_t test_vmslt_vx_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmslt_vx_i32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmslt_vv_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vbool64_t test_vmslt_vv_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vmslt_vv_i64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmslt_vx_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vbool64_t test_vmslt_vx_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmslt_vx_i64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmslt_vv_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vbool32_t test_vmslt_vv_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vmslt_vv_i64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmslt_vx_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vbool32_t test_vmslt_vx_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmslt_vx_i64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmslt_vv_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vbool16_t test_vmslt_vv_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vmslt_vv_i64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmslt_vx_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vbool16_t test_vmslt_vx_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmslt_vx_i64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmslt_vv_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vbool8_t test_vmslt_vv_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vmslt_vv_i64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmslt_vx_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vbool8_t test_vmslt_vx_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmslt_vx_i64m8_b8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vmsltu.c b/auto-generated/policy_funcs/llvm-api-tests/vmsltu.c index 583f88f0d..2191e7cc9 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vmsltu.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vmsltu.c @@ -1,183 +1,248 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vbool64_t test_vmsltu_vv_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vbool64_t test_vmsltu_vv_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vmsltu_vv_u8mf8_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsltu_vx_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vbool64_t test_vmsltu_vx_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmsltu_vx_u8mf8_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsltu_vv_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vbool32_t test_vmsltu_vv_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vmsltu_vv_u8mf4_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsltu_vx_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vbool32_t test_vmsltu_vx_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmsltu_vx_u8mf4_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsltu_vv_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vbool16_t test_vmsltu_vv_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vmsltu_vv_u8mf2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsltu_vx_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vbool16_t test_vmsltu_vx_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmsltu_vx_u8mf2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsltu_vv_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vbool8_t test_vmsltu_vv_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vmsltu_vv_u8m1_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsltu_vx_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vbool8_t test_vmsltu_vx_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmsltu_vx_u8m1_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsltu_vv_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vbool4_t test_vmsltu_vv_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vmsltu_vv_u8m2_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsltu_vx_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vbool4_t test_vmsltu_vx_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmsltu_vx_u8m2_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsltu_vv_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vbool2_t test_vmsltu_vv_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vmsltu_vv_u8m4_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsltu_vx_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vbool2_t test_vmsltu_vx_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmsltu_vx_u8m4_b2_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmsltu_vv_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vbool1_t test_vmsltu_vv_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vmsltu_vv_u8m8_b1_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmsltu_vx_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vbool1_t test_vmsltu_vx_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmsltu_vx_u8m8_b1_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsltu_vv_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vbool64_t test_vmsltu_vv_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vmsltu_vv_u16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsltu_vx_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vbool64_t test_vmsltu_vx_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmsltu_vx_u16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsltu_vv_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vbool32_t test_vmsltu_vv_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vmsltu_vv_u16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsltu_vx_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vbool32_t test_vmsltu_vx_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmsltu_vx_u16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsltu_vv_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vbool16_t test_vmsltu_vv_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vmsltu_vv_u16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsltu_vx_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vbool16_t test_vmsltu_vx_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, + vuint16m1_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmsltu_vx_u16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsltu_vv_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vbool8_t test_vmsltu_vv_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vmsltu_vv_u16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsltu_vx_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vbool8_t test_vmsltu_vx_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmsltu_vx_u16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsltu_vv_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vbool4_t test_vmsltu_vv_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vmsltu_vv_u16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsltu_vx_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vbool4_t test_vmsltu_vx_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmsltu_vx_u16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsltu_vv_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vbool2_t test_vmsltu_vv_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vmsltu_vv_u16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsltu_vx_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vbool2_t test_vmsltu_vx_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmsltu_vx_u16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsltu_vv_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vbool64_t test_vmsltu_vv_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vmsltu_vv_u32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsltu_vx_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vbool64_t test_vmsltu_vx_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmsltu_vx_u32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsltu_vv_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vbool32_t test_vmsltu_vv_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vmsltu_vv_u32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsltu_vx_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vbool32_t test_vmsltu_vx_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, + vuint32m1_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmsltu_vx_u32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsltu_vv_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vbool16_t test_vmsltu_vv_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vmsltu_vv_u32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsltu_vx_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vbool16_t test_vmsltu_vx_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, + vuint32m2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmsltu_vx_u32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsltu_vv_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vbool8_t test_vmsltu_vv_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vmsltu_vv_u32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsltu_vx_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vbool8_t test_vmsltu_vx_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmsltu_vx_u32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsltu_vv_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vbool4_t test_vmsltu_vv_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vmsltu_vv_u32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsltu_vx_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vbool4_t test_vmsltu_vx_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmsltu_vx_u32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsltu_vv_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vbool64_t test_vmsltu_vv_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vmsltu_vv_u64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsltu_vx_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vbool64_t test_vmsltu_vx_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vmsltu_vx_u64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsltu_vv_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vbool32_t test_vmsltu_vv_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vmsltu_vv_u64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsltu_vx_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vbool32_t test_vmsltu_vx_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vmsltu_vx_u64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsltu_vv_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vbool16_t test_vmsltu_vv_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vmsltu_vv_u64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsltu_vx_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vbool16_t test_vmsltu_vx_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vmsltu_vx_u64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsltu_vv_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vbool8_t test_vmsltu_vv_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vmsltu_vv_u64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsltu_vx_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vbool8_t test_vmsltu_vx_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vmsltu_vx_u64m8_b8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vmsne.c b/auto-generated/policy_funcs/llvm-api-tests/vmsne.c index 7fae96175..d3d5b04d6 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vmsne.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vmsne.c @@ -1,359 +1,465 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vbool64_t test_vmsne_vv_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vbool64_t test_vmsne_vv_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vmsne_vv_i8mf8_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vbool64_t test_vmsne_vx_i8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmsne_vx_i8mf8_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vbool32_t test_vmsne_vv_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vmsne_vv_i8mf4_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vbool32_t test_vmsne_vx_i8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmsne_vx_i8mf4_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vbool16_t test_vmsne_vv_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vmsne_vv_i8mf2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vbool16_t test_vmsne_vx_i8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmsne_vx_i8mf2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vbool8_t test_vmsne_vv_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vmsne_vv_i8m1_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vbool8_t test_vmsne_vx_i8m1_b8_mu(vbool8_t vm, vbool8_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmsne_vx_i8m1_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsne_vv_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vbool4_t test_vmsne_vv_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vmsne_vv_i8m2_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsne_vx_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vbool4_t test_vmsne_vx_i8m2_b4_mu(vbool4_t vm, vbool4_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmsne_vx_i8m2_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsne_vv_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vbool2_t test_vmsne_vv_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vmsne_vv_i8m4_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsne_vx_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vbool2_t test_vmsne_vx_i8m4_b2_mu(vbool2_t vm, vbool2_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmsne_vx_i8m4_b2_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmsne_vv_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vbool1_t test_vmsne_vv_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vmsne_vv_i8m8_b1_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmsne_vx_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vbool1_t test_vmsne_vx_i8m8_b1_mu(vbool1_t vm, vbool1_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmsne_vx_i8m8_b1_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vbool64_t test_vmsne_vv_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vmsne_vv_i16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vbool64_t test_vmsne_vx_i16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vmsne_vx_i16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vbool32_t test_vmsne_vv_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vmsne_vv_i16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vbool32_t test_vmsne_vx_i16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vmsne_vx_i16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vbool16_t test_vmsne_vv_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vmsne_vv_i16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vbool16_t test_vmsne_vx_i16m1_b16_mu(vbool16_t vm, vbool16_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmsne_vx_i16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vbool8_t test_vmsne_vv_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vmsne_vv_i16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vbool8_t test_vmsne_vx_i16m2_b8_mu(vbool8_t vm, vbool8_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmsne_vx_i16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsne_vv_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vbool4_t test_vmsne_vv_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vmsne_vv_i16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsne_vx_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vbool4_t test_vmsne_vx_i16m4_b4_mu(vbool4_t vm, vbool4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmsne_vx_i16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsne_vv_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vbool2_t test_vmsne_vv_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vmsne_vv_i16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsne_vx_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vbool2_t test_vmsne_vx_i16m8_b2_mu(vbool2_t vm, vbool2_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmsne_vx_i16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vbool64_t test_vmsne_vv_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vmsne_vv_i32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vbool64_t test_vmsne_vx_i32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vmsne_vx_i32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vbool32_t test_vmsne_vv_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vmsne_vv_i32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vbool32_t test_vmsne_vx_i32m1_b32_mu(vbool32_t vm, vbool32_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmsne_vx_i32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vbool16_t test_vmsne_vv_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vmsne_vv_i32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vbool16_t test_vmsne_vx_i32m2_b16_mu(vbool16_t vm, vbool16_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmsne_vx_i32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vbool8_t test_vmsne_vv_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vmsne_vv_i32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vbool8_t test_vmsne_vx_i32m4_b8_mu(vbool8_t vm, vbool8_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmsne_vx_i32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsne_vv_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vbool4_t test_vmsne_vv_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vmsne_vv_i32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsne_vx_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vbool4_t test_vmsne_vx_i32m8_b4_mu(vbool4_t vm, vbool4_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmsne_vx_i32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vbool64_t test_vmsne_vv_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vmsne_vv_i64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vbool64_t test_vmsne_vx_i64m1_b64_mu(vbool64_t vm, vbool64_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmsne_vx_i64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vbool32_t test_vmsne_vv_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vmsne_vv_i64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vbool32_t test_vmsne_vx_i64m2_b32_mu(vbool32_t vm, vbool32_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmsne_vx_i64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vbool16_t test_vmsne_vv_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vmsne_vv_i64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vbool16_t test_vmsne_vx_i64m4_b16_mu(vbool16_t vm, vbool16_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmsne_vx_i64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vbool8_t test_vmsne_vv_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vmsne_vv_i64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vbool8_t test_vmsne_vx_i64m8_b8_mu(vbool8_t vm, vbool8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmsne_vx_i64m8_b8_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vbool64_t test_vmsne_vv_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vmsne_vv_u8mf8_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vbool64_t test_vmsne_vx_u8mf8_b64_mu(vbool64_t vm, vbool64_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmsne_vx_u8mf8_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vbool32_t test_vmsne_vv_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vmsne_vv_u8mf4_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vbool32_t test_vmsne_vx_u8mf4_b32_mu(vbool32_t vm, vbool32_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmsne_vx_u8mf4_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vbool16_t test_vmsne_vv_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vmsne_vv_u8mf2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vbool16_t test_vmsne_vx_u8mf2_b16_mu(vbool16_t vm, vbool16_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmsne_vx_u8mf2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vbool8_t test_vmsne_vv_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vmsne_vv_u8m1_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vbool8_t test_vmsne_vx_u8m1_b8_mu(vbool8_t vm, vbool8_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmsne_vx_u8m1_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsne_vv_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vbool4_t test_vmsne_vv_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vmsne_vv_u8m2_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsne_vx_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vbool4_t test_vmsne_vx_u8m2_b4_mu(vbool4_t vm, vbool4_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmsne_vx_u8m2_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsne_vv_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vbool2_t test_vmsne_vv_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vmsne_vv_u8m4_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsne_vx_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vbool2_t test_vmsne_vx_u8m4_b2_mu(vbool2_t vm, vbool2_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmsne_vx_u8m4_b2_mu(vm, vd, vs2, rs1, vl); } -vbool1_t test_vmsne_vv_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vbool1_t test_vmsne_vv_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vmsne_vv_u8m8_b1_mu(vm, vd, vs2, vs1, vl); } -vbool1_t test_vmsne_vx_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vbool1_t test_vmsne_vx_u8m8_b1_mu(vbool1_t vm, vbool1_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmsne_vx_u8m8_b1_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vbool64_t test_vmsne_vv_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vmsne_vv_u16mf4_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vbool64_t test_vmsne_vx_u16mf4_b64_mu(vbool64_t vm, vbool64_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmsne_vx_u16mf4_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vbool32_t test_vmsne_vv_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vmsne_vv_u16mf2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vbool32_t test_vmsne_vx_u16mf2_b32_mu(vbool32_t vm, vbool32_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmsne_vx_u16mf2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vbool16_t test_vmsne_vv_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vmsne_vv_u16m1_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vbool16_t test_vmsne_vx_u16m1_b16_mu(vbool16_t vm, vbool16_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vmsne_vx_u16m1_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vbool8_t test_vmsne_vv_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vmsne_vv_u16m2_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vbool8_t test_vmsne_vx_u16m2_b8_mu(vbool8_t vm, vbool8_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmsne_vx_u16m2_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsne_vv_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vbool4_t test_vmsne_vv_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vmsne_vv_u16m4_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsne_vx_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vbool4_t test_vmsne_vx_u16m4_b4_mu(vbool4_t vm, vbool4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmsne_vx_u16m4_b4_mu(vm, vd, vs2, rs1, vl); } -vbool2_t test_vmsne_vv_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vbool2_t test_vmsne_vv_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vmsne_vv_u16m8_b2_mu(vm, vd, vs2, vs1, vl); } -vbool2_t test_vmsne_vx_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vbool2_t test_vmsne_vx_u16m8_b2_mu(vbool2_t vm, vbool2_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmsne_vx_u16m8_b2_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vbool64_t test_vmsne_vv_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vmsne_vv_u32mf2_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vbool64_t test_vmsne_vx_u32mf2_b64_mu(vbool64_t vm, vbool64_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmsne_vx_u32mf2_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vbool32_t test_vmsne_vv_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vmsne_vv_u32m1_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vbool32_t test_vmsne_vx_u32m1_b32_mu(vbool32_t vm, vbool32_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmsne_vx_u32m1_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vbool16_t test_vmsne_vv_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vmsne_vv_u32m2_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vbool16_t test_vmsne_vx_u32m2_b16_mu(vbool16_t vm, vbool16_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmsne_vx_u32m2_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vbool8_t test_vmsne_vv_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vmsne_vv_u32m4_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vbool8_t test_vmsne_vx_u32m4_b8_mu(vbool8_t vm, vbool8_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmsne_vx_u32m4_b8_mu(vm, vd, vs2, rs1, vl); } -vbool4_t test_vmsne_vv_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vbool4_t test_vmsne_vv_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vmsne_vv_u32m8_b4_mu(vm, vd, vs2, vs1, vl); } -vbool4_t test_vmsne_vx_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vbool4_t test_vmsne_vx_u32m8_b4_mu(vbool4_t vm, vbool4_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmsne_vx_u32m8_b4_mu(vm, vd, vs2, rs1, vl); } -vbool64_t test_vmsne_vv_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vbool64_t test_vmsne_vv_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vmsne_vv_u64m1_b64_mu(vm, vd, vs2, vs1, vl); } -vbool64_t test_vmsne_vx_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vbool64_t test_vmsne_vx_u64m1_b64_mu(vbool64_t vm, vbool64_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmsne_vx_u64m1_b64_mu(vm, vd, vs2, rs1, vl); } -vbool32_t test_vmsne_vv_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vbool32_t test_vmsne_vv_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vmsne_vv_u64m2_b32_mu(vm, vd, vs2, vs1, vl); } -vbool32_t test_vmsne_vx_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vbool32_t test_vmsne_vx_u64m2_b32_mu(vbool32_t vm, vbool32_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmsne_vx_u64m2_b32_mu(vm, vd, vs2, rs1, vl); } -vbool16_t test_vmsne_vv_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vbool16_t test_vmsne_vv_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vmsne_vv_u64m4_b16_mu(vm, vd, vs2, vs1, vl); } -vbool16_t test_vmsne_vx_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vbool16_t test_vmsne_vx_u64m4_b16_mu(vbool16_t vm, vbool16_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmsne_vx_u64m4_b16_mu(vm, vd, vs2, rs1, vl); } -vbool8_t test_vmsne_vv_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vbool8_t test_vmsne_vv_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vmsne_vv_u64m8_b8_mu(vm, vd, vs2, vs1, vl); } -vbool8_t test_vmsne_vx_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vbool8_t test_vmsne_vx_u64m8_b8_mu(vbool8_t vm, vbool8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vmsne_vx_u64m8_b8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vmsof.c b/auto-generated/policy_funcs/llvm-api-tests/vmsof.c index f16b44c18..8dd9f96ab 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vmsof.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vmsof.c @@ -21,14 +21,17 @@ vbool8_t test_vmsof_m_b8_mu(vbool8_t vm, vbool8_t vd, vbool8_t vs2, size_t vl) { return __riscv_vmsof_m_b8_mu(vm, vd, vs2, vl); } -vbool16_t test_vmsof_m_b16_mu(vbool16_t vm, vbool16_t vd, vbool16_t vs2, size_t vl) { +vbool16_t test_vmsof_m_b16_mu(vbool16_t vm, vbool16_t vd, vbool16_t vs2, + size_t vl) { return __riscv_vmsof_m_b16_mu(vm, vd, vs2, vl); } -vbool32_t test_vmsof_m_b32_mu(vbool32_t vm, vbool32_t vd, vbool32_t vs2, size_t vl) { +vbool32_t test_vmsof_m_b32_mu(vbool32_t vm, vbool32_t vd, vbool32_t vs2, + size_t vl) { return __riscv_vmsof_m_b32_mu(vm, vd, vs2, vl); } -vbool64_t test_vmsof_m_b64_mu(vbool64_t vm, vbool64_t vd, vbool64_t vs2, size_t vl) { +vbool64_t test_vmsof_m_b64_mu(vbool64_t vm, vbool64_t vd, vbool64_t vs2, + size_t vl) { return __riscv_vmsof_m_b64_mu(vm, vd, vs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vmul.c b/auto-generated/policy_funcs/llvm-api-tests/vmul.c index edb9cd306..1e155c454 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vmul.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vmul.c @@ -5,1410 +5,1810 @@ #include -vint8mf8_t test_vmul_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vmul_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, + size_t vl) { return __riscv_vmul_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vmul_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vmul_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vmul_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vmul_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vmul_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, + size_t vl) { return __riscv_vmul_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vmul_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vmul_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vmul_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vmul_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vmul_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, + size_t vl) { return __riscv_vmul_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vmul_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vmul_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vmul_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vmul_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vmul_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vmul_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vmul_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vmul_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, + size_t vl) { return __riscv_vmul_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vmul_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vmul_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, + size_t vl) { return __riscv_vmul_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vmul_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vmul_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vmul_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vmul_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vmul_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, + size_t vl) { return __riscv_vmul_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vmul_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vmul_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vmul_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vmul_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vmul_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, + size_t vl) { return __riscv_vmul_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vmul_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vmul_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vmul_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vmul_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vmul_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + vint16mf4_t vs1, size_t vl) { return __riscv_vmul_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vmul_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vmul_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vmul_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vmul_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vmul_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vmul_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vmul_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vmul_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vmul_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vmul_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vmul_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vmul_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vmul_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vmul_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, + size_t vl) { return __riscv_vmul_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vmul_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vmul_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, + size_t vl) { return __riscv_vmul_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vmul_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vmul_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vmul_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vmul_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vmul_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, + size_t vl) { return __riscv_vmul_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vmul_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vmul_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vmul_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vmul_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vmul_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, + size_t vl) { return __riscv_vmul_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vmul_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vmul_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, + size_t vl) { return __riscv_vmul_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vmul_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vmul_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vmul_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vmul_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vmul_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vmul_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vmul_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vmul_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vmul_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vmul_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vmul_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, + size_t vl) { return __riscv_vmul_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vmul_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vmul_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, + size_t vl) { return __riscv_vmul_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vmul_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vmul_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vmul_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vmul_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vmul_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, + size_t vl) { return __riscv_vmul_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vmul_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vmul_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, + size_t vl) { return __riscv_vmul_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vmul_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vmul_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, + size_t vl) { return __riscv_vmul_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vmul_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vmul_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, + size_t vl) { return __riscv_vmul_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vmul_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vmul_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vmul_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vmul_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vmul_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, + size_t vl) { return __riscv_vmul_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vmul_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vmul_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, + size_t vl) { return __riscv_vmul_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vmul_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vmul_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, + size_t vl) { return __riscv_vmul_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vmul_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vmul_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, + size_t vl) { return __riscv_vmul_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vmul_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vmul_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, + size_t vl) { return __riscv_vmul_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vmul_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vmul_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, + size_t vl) { return __riscv_vmul_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vmul_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vmul_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, + size_t vl) { return __riscv_vmul_vx_i64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vmul_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vmul_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vmul_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vmul_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vmul_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vmul_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vmul_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vmul_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vmul_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vmul_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vmul_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vmul_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vmul_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vmul_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vmul_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vmul_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vmul_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vmul_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vmul_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vmul_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vmul_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vmul_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vmul_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vmul_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vmul_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vmul_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vmul_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vmul_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vmul_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vmul_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vmul_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vmul_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vmul_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vmul_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vmul_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vmul_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vmul_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vmul_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { return __riscv_vmul_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vmul_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vmul_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vmul_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vmul_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vmul_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vmul_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vmul_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vmul_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmul_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vmul_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vmul_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vmul_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vmul_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vmul_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmul_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vmul_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vmul_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vmul_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vmul_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vmul_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmul_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vmul_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vmul_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vmul_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vmul_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vmul_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmul_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vmul_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vmul_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vmul_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vmul_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vmul_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmul_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vmul_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vmul_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vmul_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vmul_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vmul_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmul_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vmul_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vmul_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vmul_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vmul_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vmul_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmul_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vmul_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vmul_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vmul_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vmul_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vmul_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmul_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vmul_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vmul_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vmul_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vmul_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vmul_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmul_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vmul_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vmul_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vmul_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vmul_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vmul_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmul_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vmul_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vmul_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vmul_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vmul_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vmul_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmul_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vmul_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vmul_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vmul_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vmul_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vmul_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vmul_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vmul_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vmul_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vmul_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vmul_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vmul_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vmul_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vmul_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vmul_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vmul_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vmul_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vmul_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vmul_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vmul_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vmul_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vmul_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vmul_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vmul_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vmul_vx_u64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vmul_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vmul_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vmul_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmul_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vmul_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmul_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmul_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vmul_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vmul_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmul_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vmul_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmul_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmul_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vmul_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vmul_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmul_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vmul_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmul_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmul_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vmul_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vmul_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmul_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vmul_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmul_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmul_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vmul_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vmul_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmul_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vmul_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmul_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmul_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vmul_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vmul_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmul_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vmul_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmul_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmul_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vmul_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vmul_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmul_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vmul_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmul_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmul_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vmul_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vmul_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmul_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vmul_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vmul_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmul_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vmul_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vmul_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmul_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vmul_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vmul_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmul_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vmul_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vmul_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmul_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vmul_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmul_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmul_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vmul_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vmul_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmul_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vmul_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmul_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmul_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vmul_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vmul_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmul_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vmul_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmul_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmul_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vmul_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vmul_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmul_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vmul_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmul_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmul_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vmul_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vmul_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmul_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vmul_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vmul_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmul_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vmul_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vmul_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmul_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vmul_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmul_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmul_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vmul_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vmul_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmul_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vmul_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmul_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmul_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vmul_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vmul_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmul_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vmul_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmul_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmul_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vmul_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vmul_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmul_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vmul_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmul_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmul_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vmul_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vmul_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmul_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vmul_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmul_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmul_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vmul_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vmul_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmul_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vmul_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmul_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmul_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vmul_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vmul_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmul_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vmul_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmul_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmul_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vmul_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vmul_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmul_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vmul_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmul_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vmul_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vmul_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vmul_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vmul_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vmul_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmul_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vmul_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vmul_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vmul_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vmul_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vmul_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmul_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vmul_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vmul_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vmul_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vmul_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vmul_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmul_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vmul_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vmul_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vmul_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vmul_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vmul_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmul_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vmul_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vmul_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vmul_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vmul_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vmul_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmul_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vmul_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vmul_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vmul_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vmul_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vmul_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmul_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vmul_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vmul_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vmul_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vmul_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vmul_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmul_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vmul_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vmul_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vmul_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vmul_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vmul_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmul_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vmul_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vmul_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vmul_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vmul_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vmul_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmul_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vmul_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vmul_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vmul_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vmul_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vmul_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vmul_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vmul_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vmul_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vmul_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vmul_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vmul_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmul_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vmul_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vmul_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vmul_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vmul_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vmul_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmul_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vmul_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vmul_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vmul_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vmul_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vmul_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmul_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vmul_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vmul_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vmul_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vmul_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vmul_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmul_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vmul_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vmul_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vmul_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vmul_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vmul_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmul_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vmul_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vmul_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vmul_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vmul_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vmul_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmul_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vmul_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vmul_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vmul_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vmul_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vmul_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmul_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vmul_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vmul_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vmul_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vmul_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vmul_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmul_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vmul_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vmul_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vmul_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vmul_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vmul_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmul_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vmul_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vmul_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vmul_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vmul_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vmul_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmul_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vmul_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vmul_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vmul_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vmul_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vmul_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmul_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vmul_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vmul_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vmul_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vmul_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vmul_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vmul_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vmul_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vmul_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vmul_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmul_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vmul_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmul_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmul_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vmul_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vmul_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmul_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vmul_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmul_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmul_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vmul_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vmul_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmul_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vmul_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmul_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmul_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vmul_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vmul_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmul_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vmul_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmul_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmul_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vmul_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vmul_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmul_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vmul_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmul_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmul_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vmul_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vmul_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmul_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vmul_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmul_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmul_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vmul_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vmul_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmul_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vmul_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmul_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmul_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vmul_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vmul_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmul_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vmul_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vmul_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmul_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vmul_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vmul_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmul_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vmul_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vmul_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmul_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vmul_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vmul_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmul_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vmul_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmul_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmul_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vmul_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vmul_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmul_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vmul_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmul_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmul_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vmul_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vmul_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmul_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vmul_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmul_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmul_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vmul_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vmul_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmul_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vmul_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmul_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmul_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vmul_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vmul_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmul_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vmul_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vmul_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmul_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vmul_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vmul_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmul_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vmul_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmul_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmul_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vmul_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vmul_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmul_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vmul_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmul_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmul_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vmul_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vmul_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmul_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vmul_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmul_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmul_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vmul_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vmul_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmul_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vmul_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmul_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmul_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vmul_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vmul_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmul_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vmul_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmul_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmul_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vmul_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vmul_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmul_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vmul_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmul_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmul_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vmul_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vmul_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmul_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vmul_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmul_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmul_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vmul_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vmul_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmul_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vmul_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmul_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vmul_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vmul_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vmul_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vmul_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vmul_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmul_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vmul_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vmul_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vmul_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vmul_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vmul_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmul_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vmul_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vmul_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vmul_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vmul_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vmul_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmul_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vmul_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vmul_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vmul_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vmul_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vmul_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmul_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vmul_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vmul_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vmul_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vmul_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vmul_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmul_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vmul_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vmul_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vmul_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vmul_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vmul_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmul_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vmul_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vmul_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vmul_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vmul_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vmul_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmul_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vmul_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vmul_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vmul_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vmul_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vmul_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmul_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vmul_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vmul_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vmul_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vmul_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vmul_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmul_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vmul_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vmul_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vmul_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vmul_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vmul_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vmul_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vmul_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vmul_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vmul_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vmul_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vmul_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vmul_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vmul_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vmul_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vmul_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vmul_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vmul_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vmul_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vmul_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vmul_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vmul_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vmul_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vmul_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vmul_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vmul_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vmul_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vmul_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vmul_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vmul_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmul_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vmul_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vmul_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vmul_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vmul_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vmul_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmul_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vmul_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vmul_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vmul_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vmul_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vmul_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmul_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vmul_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vmul_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vmul_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vmul_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vmul_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmul_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vmul_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vmul_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vmul_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vmul_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vmul_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmul_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vmul_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vmul_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vmul_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vmul_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vmul_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmul_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vmul_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vmul_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vmul_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vmul_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vmul_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmul_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vmul_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vmul_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vmul_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vmul_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vmul_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmul_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vmul_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vmul_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vmul_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vmul_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vmul_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmul_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vmul_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vmul_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vmul_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmul_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vmul_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmul_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmul_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vmul_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vmul_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmul_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vmul_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmul_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmul_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vmul_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vmul_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmul_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vmul_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmul_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmul_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vmul_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vmul_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmul_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vmul_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmul_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmul_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vmul_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vmul_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmul_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vmul_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmul_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmul_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vmul_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vmul_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmul_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vmul_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmul_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmul_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vmul_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vmul_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmul_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vmul_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmul_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmul_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vmul_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vmul_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmul_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vmul_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vmul_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmul_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vmul_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vmul_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmul_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vmul_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vmul_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmul_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vmul_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vmul_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmul_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vmul_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmul_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmul_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vmul_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vmul_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmul_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vmul_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmul_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmul_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vmul_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vmul_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmul_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vmul_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmul_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmul_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vmul_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vmul_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmul_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vmul_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmul_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmul_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vmul_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vmul_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmul_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vmul_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vmul_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmul_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vmul_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vmul_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmul_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vmul_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmul_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmul_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vmul_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vmul_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmul_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vmul_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmul_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmul_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vmul_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vmul_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmul_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vmul_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmul_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmul_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vmul_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vmul_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmul_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vmul_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmul_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmul_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vmul_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vmul_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmul_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vmul_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmul_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmul_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vmul_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vmul_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmul_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vmul_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmul_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmul_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vmul_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vmul_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmul_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vmul_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmul_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmul_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vmul_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vmul_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmul_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vmul_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmul_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vmul_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vmul_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vmul_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vmul_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vmul_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmul_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vmul_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vmul_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vmul_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vmul_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vmul_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmul_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vmul_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vmul_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vmul_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vmul_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vmul_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmul_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vmul_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vmul_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vmul_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vmul_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vmul_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmul_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vmul_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vmul_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vmul_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vmul_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vmul_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmul_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vmul_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vmul_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vmul_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vmul_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vmul_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmul_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vmul_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vmul_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vmul_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vmul_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vmul_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmul_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vmul_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vmul_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vmul_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vmul_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vmul_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vmul_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vmul_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vmul_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vmul_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vmul_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vmul_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vmul_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vmul_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vmul_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vmul_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vmul_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vmul_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmul_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vmul_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vmul_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vmul_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vmul_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vmul_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmul_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vmul_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vmul_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vmul_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vmul_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vmul_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmul_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vmul_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vmul_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vmul_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vmul_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vmul_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmul_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vmul_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vmul_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vmul_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vmul_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vmul_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmul_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vmul_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vmul_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vmul_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vmul_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vmul_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmul_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vmul_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vmul_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vmul_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vmul_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vmul_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmul_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vmul_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vmul_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vmul_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vmul_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vmul_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmul_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vmul_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vmul_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vmul_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vmul_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vmul_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmul_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vmul_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vmul_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vmul_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vmul_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vmul_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vmul_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vmul_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vmul_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vmul_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vmul_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vmul_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vmul_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vmul_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vmul_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vmul_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vmul_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vmul_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vmul_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vmul_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vmul_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vmul_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vmul_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vmul_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vmul_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vmulh.c b/auto-generated/policy_funcs/llvm-api-tests/vmulh.c index 51bab4943..4e947a571 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vmulh.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vmulh.c @@ -5,706 +5,891 @@ #include -vint8mf8_t test_vmulh_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vmulh_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, + size_t vl) { return __riscv_vmulh_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vmulh_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vmulh_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vmulh_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vmulh_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vmulh_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, + size_t vl) { return __riscv_vmulh_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vmulh_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vmulh_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vmulh_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vmulh_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vmulh_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, + size_t vl) { return __riscv_vmulh_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vmulh_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vmulh_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vmulh_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vmulh_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vmulh_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vmulh_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vmulh_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vmulh_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, + size_t vl) { return __riscv_vmulh_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vmulh_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vmulh_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, + size_t vl) { return __riscv_vmulh_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vmulh_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vmulh_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vmulh_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vmulh_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vmulh_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, + size_t vl) { return __riscv_vmulh_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vmulh_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vmulh_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vmulh_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vmulh_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vmulh_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, + size_t vl) { return __riscv_vmulh_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vmulh_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vmulh_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vmulh_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vmulh_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vmulh_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + vint16mf4_t vs1, size_t vl) { return __riscv_vmulh_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vmulh_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vmulh_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmulh_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vmulh_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vmulh_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vmulh_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vmulh_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vmulh_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmulh_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vmulh_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vmulh_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vmulh_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vmulh_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vmulh_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, + size_t vl) { return __riscv_vmulh_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vmulh_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vmulh_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, + size_t vl) { return __riscv_vmulh_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vmulh_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vmulh_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vmulh_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vmulh_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vmulh_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, + size_t vl) { return __riscv_vmulh_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vmulh_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vmulh_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vmulh_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vmulh_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vmulh_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, + size_t vl) { return __riscv_vmulh_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vmulh_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vmulh_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, + size_t vl) { return __riscv_vmulh_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vmulh_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vmulh_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vmulh_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vmulh_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vmulh_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmulh_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vmulh_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vmulh_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vmulh_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vmulh_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vmulh_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, + size_t vl) { return __riscv_vmulh_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vmulh_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vmulh_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, + size_t vl) { return __riscv_vmulh_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vmulh_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vmulh_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vmulh_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vmulh_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vmulh_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, + size_t vl) { return __riscv_vmulh_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vmulh_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vmulh_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, + size_t vl) { return __riscv_vmulh_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vmulh_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vmulh_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, + size_t vl) { return __riscv_vmulh_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vmulh_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vmulh_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, + size_t vl) { return __riscv_vmulh_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vmulh_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vmulh_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vmulh_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vmulh_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vmulh_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, + size_t vl) { return __riscv_vmulh_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vmulh_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vmulh_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, + size_t vl) { return __riscv_vmulh_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vmulh_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vmulh_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, + size_t vl) { return __riscv_vmulh_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vmulh_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vmulh_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, + size_t vl) { return __riscv_vmulh_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vmulh_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vmulh_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, + size_t vl) { return __riscv_vmulh_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vmulh_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vmulh_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, + size_t vl) { return __riscv_vmulh_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vmulh_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vmulh_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, + size_t vl) { return __riscv_vmulh_vx_i64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vmulh_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vmulh_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vmulh_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmulh_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vmulh_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmulh_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmulh_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vmulh_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vmulh_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmulh_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vmulh_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmulh_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmulh_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vmulh_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vmulh_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmulh_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vmulh_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmulh_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmulh_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vmulh_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vmulh_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmulh_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vmulh_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmulh_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmulh_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vmulh_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vmulh_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmulh_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vmulh_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmulh_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmulh_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vmulh_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vmulh_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmulh_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vmulh_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmulh_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmulh_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vmulh_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vmulh_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmulh_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vmulh_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmulh_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmulh_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vmulh_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vmulh_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmulh_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vmulh_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vmulh_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmulh_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vmulh_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vmulh_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmulh_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vmulh_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vmulh_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmulh_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vmulh_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vmulh_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmulh_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vmulh_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmulh_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmulh_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vmulh_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vmulh_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmulh_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vmulh_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmulh_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmulh_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vmulh_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vmulh_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmulh_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vmulh_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmulh_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmulh_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vmulh_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vmulh_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmulh_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vmulh_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmulh_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmulh_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vmulh_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vmulh_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmulh_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vmulh_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vmulh_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmulh_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vmulh_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vmulh_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmulh_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vmulh_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmulh_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmulh_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vmulh_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vmulh_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmulh_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vmulh_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmulh_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmulh_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vmulh_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vmulh_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmulh_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vmulh_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmulh_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmulh_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vmulh_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vmulh_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmulh_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vmulh_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmulh_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmulh_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vmulh_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vmulh_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmulh_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vmulh_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmulh_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmulh_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vmulh_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vmulh_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmulh_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vmulh_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmulh_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmulh_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vmulh_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vmulh_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmulh_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vmulh_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmulh_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmulh_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vmulh_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vmulh_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmulh_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vmulh_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmulh_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vmulh_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vmulh_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vmulh_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmulh_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vmulh_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmulh_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmulh_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vmulh_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vmulh_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmulh_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vmulh_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmulh_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmulh_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vmulh_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vmulh_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmulh_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vmulh_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmulh_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmulh_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vmulh_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vmulh_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmulh_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vmulh_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmulh_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmulh_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vmulh_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vmulh_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmulh_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vmulh_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmulh_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmulh_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vmulh_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vmulh_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmulh_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vmulh_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmulh_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmulh_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vmulh_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vmulh_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmulh_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vmulh_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmulh_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmulh_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vmulh_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vmulh_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmulh_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vmulh_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vmulh_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmulh_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vmulh_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vmulh_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmulh_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vmulh_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vmulh_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmulh_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vmulh_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vmulh_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmulh_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vmulh_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmulh_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmulh_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vmulh_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vmulh_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmulh_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vmulh_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmulh_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmulh_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vmulh_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vmulh_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmulh_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vmulh_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmulh_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmulh_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vmulh_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vmulh_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmulh_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vmulh_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmulh_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmulh_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vmulh_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vmulh_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmulh_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vmulh_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vmulh_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmulh_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vmulh_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vmulh_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmulh_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vmulh_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmulh_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmulh_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vmulh_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vmulh_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmulh_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vmulh_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmulh_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmulh_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vmulh_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vmulh_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmulh_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vmulh_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmulh_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmulh_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vmulh_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vmulh_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmulh_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vmulh_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmulh_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmulh_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vmulh_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vmulh_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmulh_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vmulh_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmulh_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmulh_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vmulh_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vmulh_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmulh_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vmulh_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmulh_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmulh_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vmulh_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vmulh_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmulh_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vmulh_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmulh_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmulh_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vmulh_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vmulh_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmulh_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vmulh_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmulh_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vmulh_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vmulh_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vmulh_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmulh_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vmulh_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmulh_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmulh_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vmulh_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vmulh_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmulh_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vmulh_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmulh_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmulh_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vmulh_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vmulh_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmulh_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vmulh_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmulh_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmulh_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vmulh_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vmulh_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmulh_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vmulh_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmulh_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmulh_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vmulh_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vmulh_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmulh_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vmulh_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmulh_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmulh_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vmulh_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vmulh_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmulh_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vmulh_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmulh_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmulh_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vmulh_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vmulh_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmulh_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vmulh_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vmulh_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmulh_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vmulh_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vmulh_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmulh_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vmulh_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vmulh_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmulh_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vmulh_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vmulh_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmulh_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vmulh_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vmulh_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmulh_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vmulh_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vmulh_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmulh_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vmulh_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmulh_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmulh_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vmulh_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vmulh_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmulh_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vmulh_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmulh_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmulh_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vmulh_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vmulh_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmulh_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vmulh_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmulh_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmulh_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vmulh_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vmulh_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmulh_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vmulh_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vmulh_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmulh_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vmulh_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vmulh_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmulh_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vmulh_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vmulh_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmulh_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vmulh_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vmulh_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmulh_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vmulh_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmulh_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmulh_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vmulh_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vmulh_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmulh_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vmulh_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmulh_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmulh_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vmulh_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vmulh_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmulh_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vmulh_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmulh_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmulh_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vmulh_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vmulh_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmulh_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vmulh_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vmulh_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmulh_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vmulh_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vmulh_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmulh_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vmulh_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmulh_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmulh_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vmulh_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vmulh_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmulh_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vmulh_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmulh_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmulh_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vmulh_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vmulh_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmulh_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vmulh_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmulh_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmulh_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vmulh_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vmulh_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmulh_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vmulh_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vmulh_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vmulhsu.c b/auto-generated/policy_funcs/llvm-api-tests/vmulhsu.c index 0380bbc60..f6636ff5a 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vmulhsu.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vmulhsu.c @@ -5,706 +5,924 @@ #include -vint8mf8_t test_vmulhsu_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vmulhsu_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vmulhsu_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, uint8_t rs1, size_t vl) { +vint8mf8_t test_vmulhsu_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vmulhsu_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vmulhsu_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vmulhsu_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vmulhsu_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, uint8_t rs1, size_t vl) { +vint8mf4_t test_vmulhsu_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vmulhsu_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vmulhsu_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vmulhsu_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vmulhsu_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, uint8_t rs1, size_t vl) { +vint8mf2_t test_vmulhsu_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vmulhsu_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vmulhsu_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vint8m1_t test_vmulhsu_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vmulhsu_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, uint8_t rs1, size_t vl) { +vint8m1_t test_vmulhsu_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vmulhsu_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vmulhsu_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vint8m2_t test_vmulhsu_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vmulhsu_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, uint8_t rs1, size_t vl) { +vint8m2_t test_vmulhsu_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vmulhsu_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vmulhsu_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vint8m4_t test_vmulhsu_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vmulhsu_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, uint8_t rs1, size_t vl) { +vint8m4_t test_vmulhsu_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vmulhsu_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vmulhsu_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vint8m8_t test_vmulhsu_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vmulhsu_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, uint8_t rs1, size_t vl) { +vint8m8_t test_vmulhsu_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vmulhsu_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vmulhsu_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vmulhsu_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vmulhsu_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, uint16_t rs1, size_t vl) { +vint16mf4_t test_vmulhsu_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vmulhsu_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vmulhsu_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vmulhsu_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, uint16_t rs1, size_t vl) { +vint16mf2_t test_vmulhsu_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vmulhsu_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vmulhsu_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vmulhsu_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, uint16_t rs1, size_t vl) { +vint16m1_t test_vmulhsu_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmulhsu_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vmulhsu_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vmulhsu_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vmulhsu_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, uint16_t rs1, size_t vl) { +vint16m2_t test_vmulhsu_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmulhsu_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vmulhsu_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vmulhsu_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vmulhsu_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, uint16_t rs1, size_t vl) { +vint16m4_t test_vmulhsu_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmulhsu_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vmulhsu_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vint16m8_t test_vmulhsu_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vmulhsu_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, uint16_t rs1, size_t vl) { +vint16m8_t test_vmulhsu_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmulhsu_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vmulhsu_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vmulhsu_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vmulhsu_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, uint32_t rs1, size_t vl) { +vint32mf2_t test_vmulhsu_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vmulhsu_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vint32m1_t test_vmulhsu_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vmulhsu_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, uint32_t rs1, size_t vl) { +vint32m1_t test_vmulhsu_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmulhsu_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vmulhsu_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vint32m2_t test_vmulhsu_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vmulhsu_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, uint32_t rs1, size_t vl) { +vint32m2_t test_vmulhsu_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmulhsu_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vmulhsu_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vint32m4_t test_vmulhsu_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vmulhsu_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, uint32_t rs1, size_t vl) { +vint32m4_t test_vmulhsu_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmulhsu_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vmulhsu_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vint32m8_t test_vmulhsu_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vmulhsu_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, uint32_t rs1, size_t vl) { +vint32m8_t test_vmulhsu_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmulhsu_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vmulhsu_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vint64m1_t test_vmulhsu_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vmulhsu_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, uint64_t rs1, size_t vl) { +vint64m1_t test_vmulhsu_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vmulhsu_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vmulhsu_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vint64m2_t test_vmulhsu_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vmulhsu_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, uint64_t rs1, size_t vl) { +vint64m2_t test_vmulhsu_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vmulhsu_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vmulhsu_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vint64m4_t test_vmulhsu_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vmulhsu_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, uint64_t rs1, size_t vl) { +vint64m4_t test_vmulhsu_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vmulhsu_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vmulhsu_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vint64m8_t test_vmulhsu_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vmulhsu_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, uint64_t rs1, size_t vl) { +vint64m8_t test_vmulhsu_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vmulhsu_vx_i64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vmulhsu_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vmulhsu_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + vint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmulhsu_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, uint8_t rs1, size_t vl) { +vint8mf8_t test_vmulhsu_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + vint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmulhsu_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vmulhsu_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + vint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmulhsu_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, uint8_t rs1, size_t vl) { +vint8mf4_t test_vmulhsu_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + vint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmulhsu_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vmulhsu_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, + vint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmulhsu_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, uint8_t rs1, size_t vl) { +vint8mf2_t test_vmulhsu_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, + vint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmulhsu_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vint8m1_t test_vmulhsu_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmulhsu_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, uint8_t rs1, size_t vl) { +vint8m1_t test_vmulhsu_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmulhsu_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vint8m2_t test_vmulhsu_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmulhsu_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, uint8_t rs1, size_t vl) { +vint8m2_t test_vmulhsu_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmulhsu_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vint8m4_t test_vmulhsu_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmulhsu_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, uint8_t rs1, size_t vl) { +vint8m4_t test_vmulhsu_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmulhsu_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vint8m8_t test_vmulhsu_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmulhsu_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, uint8_t rs1, size_t vl) { +vint8m8_t test_vmulhsu_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmulhsu_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vmulhsu_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmulhsu_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, uint16_t rs1, size_t vl) { +vint16mf4_t test_vmulhsu_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmulhsu_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmulhsu_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vmulhsu_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmulhsu_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, uint16_t rs1, size_t vl) { +vint16mf2_t test_vmulhsu_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmulhsu_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmulhsu_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vmulhsu_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmulhsu_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, uint16_t rs1, size_t vl) { +vint16m1_t test_vmulhsu_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmulhsu_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vmulhsu_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmulhsu_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, uint16_t rs1, size_t vl) { +vint16m2_t test_vmulhsu_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmulhsu_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vmulhsu_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmulhsu_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, uint16_t rs1, size_t vl) { +vint16m4_t test_vmulhsu_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmulhsu_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vint16m8_t test_vmulhsu_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmulhsu_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, uint16_t rs1, size_t vl) { +vint16m8_t test_vmulhsu_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmulhsu_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vmulhsu_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmulhsu_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, uint32_t rs1, size_t vl) { +vint32mf2_t test_vmulhsu_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmulhsu_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmulhsu_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vint32m1_t test_vmulhsu_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmulhsu_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, uint32_t rs1, size_t vl) { +vint32m1_t test_vmulhsu_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmulhsu_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vint32m2_t test_vmulhsu_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, + vint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmulhsu_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, uint32_t rs1, size_t vl) { +vint32m2_t test_vmulhsu_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, + vint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmulhsu_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vint32m4_t test_vmulhsu_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmulhsu_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, uint32_t rs1, size_t vl) { +vint32m4_t test_vmulhsu_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmulhsu_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vint32m8_t test_vmulhsu_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmulhsu_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, uint32_t rs1, size_t vl) { +vint32m8_t test_vmulhsu_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmulhsu_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vint64m1_t test_vmulhsu_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmulhsu_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, uint64_t rs1, size_t vl) { +vint64m1_t test_vmulhsu_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmulhsu_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vint64m2_t test_vmulhsu_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, + vint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmulhsu_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, uint64_t rs1, size_t vl) { +vint64m2_t test_vmulhsu_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, + vint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmulhsu_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vint64m4_t test_vmulhsu_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, + vint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmulhsu_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, uint64_t rs1, size_t vl) { +vint64m4_t test_vmulhsu_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, + vint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmulhsu_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vint64m8_t test_vmulhsu_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmulhsu_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, uint64_t rs1, size_t vl) { +vint64m8_t test_vmulhsu_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vmulhsu_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vmulhsu_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + vint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmulhsu_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, uint8_t rs1, size_t vl) { +vint8mf8_t test_vmulhsu_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + vint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmulhsu_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vmulhsu_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + vint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmulhsu_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, uint8_t rs1, size_t vl) { +vint8mf4_t test_vmulhsu_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + vint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmulhsu_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vmulhsu_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + vint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmulhsu_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, uint8_t rs1, size_t vl) { +vint8mf2_t test_vmulhsu_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + vint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmulhsu_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vint8m1_t test_vmulhsu_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmulhsu_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, uint8_t rs1, size_t vl) { +vint8m1_t test_vmulhsu_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmulhsu_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vint8m2_t test_vmulhsu_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmulhsu_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, uint8_t rs1, size_t vl) { +vint8m2_t test_vmulhsu_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmulhsu_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vint8m4_t test_vmulhsu_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmulhsu_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, uint8_t rs1, size_t vl) { +vint8m4_t test_vmulhsu_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmulhsu_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vint8m8_t test_vmulhsu_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmulhsu_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, uint8_t rs1, size_t vl) { +vint8m8_t test_vmulhsu_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmulhsu_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vmulhsu_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmulhsu_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, uint16_t rs1, size_t vl) { +vint16mf4_t test_vmulhsu_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmulhsu_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmulhsu_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vmulhsu_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmulhsu_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, uint16_t rs1, size_t vl) { +vint16mf2_t test_vmulhsu_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmulhsu_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmulhsu_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vmulhsu_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmulhsu_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, uint16_t rs1, size_t vl) { +vint16m1_t test_vmulhsu_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmulhsu_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vmulhsu_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, + vint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmulhsu_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, uint16_t rs1, size_t vl) { +vint16m2_t test_vmulhsu_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, + vint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmulhsu_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vmulhsu_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, + vint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmulhsu_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, uint16_t rs1, size_t vl) { +vint16m4_t test_vmulhsu_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, + vint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmulhsu_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vint16m8_t test_vmulhsu_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, + vint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmulhsu_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, uint16_t rs1, size_t vl) { +vint16m8_t test_vmulhsu_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, + vint16m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmulhsu_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vmulhsu_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmulhsu_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, uint32_t rs1, size_t vl) { +vint32mf2_t test_vmulhsu_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmulhsu_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmulhsu_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vint32m1_t test_vmulhsu_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmulhsu_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, uint32_t rs1, size_t vl) { +vint32m1_t test_vmulhsu_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmulhsu_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vint32m2_t test_vmulhsu_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + vint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmulhsu_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, uint32_t rs1, size_t vl) { +vint32m2_t test_vmulhsu_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + vint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmulhsu_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vint32m4_t test_vmulhsu_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, + vint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmulhsu_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, uint32_t rs1, size_t vl) { +vint32m4_t test_vmulhsu_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, + vint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmulhsu_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vint32m8_t test_vmulhsu_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, + vint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmulhsu_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, uint32_t rs1, size_t vl) { +vint32m8_t test_vmulhsu_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, + vint32m8_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmulhsu_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vint64m1_t test_vmulhsu_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmulhsu_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, uint64_t rs1, size_t vl) { +vint64m1_t test_vmulhsu_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmulhsu_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vint64m2_t test_vmulhsu_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + vint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmulhsu_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, uint64_t rs1, size_t vl) { +vint64m2_t test_vmulhsu_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + vint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmulhsu_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vint64m4_t test_vmulhsu_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + vint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmulhsu_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, uint64_t rs1, size_t vl) { +vint64m4_t test_vmulhsu_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + vint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmulhsu_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vint64m8_t test_vmulhsu_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, + vint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmulhsu_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, uint64_t rs1, size_t vl) { +vint64m8_t test_vmulhsu_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, + vint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vmulhsu_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vmulhsu_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vmulhsu_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, uint8_t rs1, size_t vl) { +vint8mf8_t test_vmulhsu_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vmulhsu_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vmulhsu_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vmulhsu_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, uint8_t rs1, size_t vl) { +vint8mf4_t test_vmulhsu_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vmulhsu_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vmulhsu_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vmulhsu_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, uint8_t rs1, size_t vl) { +vint8mf2_t test_vmulhsu_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vmulhsu_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vint8m1_t test_vmulhsu_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vmulhsu_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, uint8_t rs1, size_t vl) { +vint8m1_t test_vmulhsu_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vmulhsu_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vint8m2_t test_vmulhsu_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vmulhsu_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, uint8_t rs1, size_t vl) { +vint8m2_t test_vmulhsu_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vmulhsu_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vint8m4_t test_vmulhsu_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vmulhsu_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, uint8_t rs1, size_t vl) { +vint8m4_t test_vmulhsu_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vmulhsu_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vint8m8_t test_vmulhsu_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vmulhsu_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, uint8_t rs1, size_t vl) { +vint8m8_t test_vmulhsu_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vmulhsu_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vmulhsu_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vmulhsu_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, uint16_t rs1, size_t vl) { +vint16mf4_t test_vmulhsu_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmulhsu_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vmulhsu_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vmulhsu_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vmulhsu_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, uint16_t rs1, size_t vl) { +vint16mf2_t test_vmulhsu_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmulhsu_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vmulhsu_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vmulhsu_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vmulhsu_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, uint16_t rs1, size_t vl) { +vint16m1_t test_vmulhsu_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vmulhsu_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vmulhsu_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vmulhsu_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, uint16_t rs1, size_t vl) { +vint16m2_t test_vmulhsu_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vmulhsu_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vmulhsu_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vmulhsu_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, uint16_t rs1, size_t vl) { +vint16m4_t test_vmulhsu_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vmulhsu_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vint16m8_t test_vmulhsu_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vmulhsu_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, uint16_t rs1, size_t vl) { +vint16m8_t test_vmulhsu_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vmulhsu_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vmulhsu_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vmulhsu_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vmulhsu_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, uint32_t rs1, size_t vl) { +vint32mf2_t test_vmulhsu_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmulhsu_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vmulhsu_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vint32m1_t test_vmulhsu_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vmulhsu_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, uint32_t rs1, size_t vl) { +vint32m1_t test_vmulhsu_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vmulhsu_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vint32m2_t test_vmulhsu_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vmulhsu_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, uint32_t rs1, size_t vl) { +vint32m2_t test_vmulhsu_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vmulhsu_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vint32m4_t test_vmulhsu_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vmulhsu_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, uint32_t rs1, size_t vl) { +vint32m4_t test_vmulhsu_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vmulhsu_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vint32m8_t test_vmulhsu_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vmulhsu_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, uint32_t rs1, size_t vl) { +vint32m8_t test_vmulhsu_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vmulhsu_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vint64m1_t test_vmulhsu_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vmulhsu_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, uint64_t rs1, size_t vl) { +vint64m1_t test_vmulhsu_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vmulhsu_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vint64m2_t test_vmulhsu_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vmulhsu_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, uint64_t rs1, size_t vl) { +vint64m2_t test_vmulhsu_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vmulhsu_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vint64m4_t test_vmulhsu_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vmulhsu_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, uint64_t rs1, size_t vl) { +vint64m4_t test_vmulhsu_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vmulhsu_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vint64m8_t test_vmulhsu_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vmulhsu_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vmulhsu_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, uint64_t rs1, size_t vl) { +vint64m8_t test_vmulhsu_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vmulhsu_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vmulhu.c b/auto-generated/policy_funcs/llvm-api-tests/vmulhu.c index 7ec40fbbe..3cefc3a1e 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vmulhu.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vmulhu.c @@ -5,706 +5,957 @@ #include -vuint8mf8_t test_vmulhu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vmulhu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vmulhu_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vmulhu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vmulhu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmulhu_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vmulhu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vmulhu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vmulhu_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vmulhu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vmulhu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmulhu_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vmulhu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vmulhu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vmulhu_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vmulhu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vmulhu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmulhu_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vmulhu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vmulhu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vmulhu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vmulhu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vmulhu_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vmulhu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vmulhu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vmulhu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vmulhu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vmulhu_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vmulhu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vmulhu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vmulhu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vmulhu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vmulhu_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vmulhu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vmulhu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vmulhu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vmulhu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vmulhu_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vmulhu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vmulhu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vmulhu_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vmulhu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vmulhu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmulhu_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vmulhu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vmulhu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vmulhu_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vmulhu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vmulhu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmulhu_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vmulhu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vmulhu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vmulhu_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vmulhu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vmulhu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmulhu_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vmulhu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vmulhu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vmulhu_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vmulhu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vmulhu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmulhu_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vmulhu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vmulhu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vmulhu_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vmulhu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vmulhu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmulhu_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vmulhu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vmulhu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vmulhu_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vmulhu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vmulhu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vmulhu_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vmulhu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vmulhu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vmulhu_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vmulhu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vmulhu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmulhu_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vmulhu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vmulhu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vmulhu_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vmulhu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vmulhu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmulhu_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vmulhu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vmulhu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vmulhu_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vmulhu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vmulhu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmulhu_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vmulhu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vmulhu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vmulhu_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vmulhu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vmulhu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmulhu_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vmulhu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vmulhu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vmulhu_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vmulhu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vmulhu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vmulhu_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vmulhu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vmulhu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vmulhu_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vmulhu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vmulhu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vmulhu_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vmulhu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vmulhu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vmulhu_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vmulhu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vmulhu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vmulhu_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vmulhu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vmulhu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vmulhu_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vmulhu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vmulhu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vmulhu_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vmulhu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vmulhu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vmulhu_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vmulhu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vmulhu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vmulhu_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vmulhu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vmulhu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vmulhu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vmulhu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmulhu_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vmulhu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vmulhu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vmulhu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vmulhu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmulhu_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vmulhu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vmulhu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vmulhu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vmulhu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmulhu_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vmulhu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vmulhu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vmulhu_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vmulhu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vmulhu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmulhu_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vmulhu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vmulhu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vmulhu_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vmulhu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vmulhu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmulhu_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vmulhu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vmulhu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vmulhu_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vmulhu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vmulhu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmulhu_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vmulhu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vmulhu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vmulhu_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vmulhu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vmulhu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmulhu_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vmulhu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vmulhu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vmulhu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vmulhu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmulhu_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vmulhu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vmulhu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vmulhu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vmulhu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmulhu_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vmulhu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vmulhu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vmulhu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vmulhu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vmulhu_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vmulhu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vmulhu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vmulhu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vmulhu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vmulhu_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vmulhu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vmulhu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vmulhu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vmulhu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vmulhu_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vmulhu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vmulhu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vmulhu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vmulhu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vmulhu_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vmulhu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vmulhu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vmulhu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vmulhu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmulhu_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vmulhu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vmulhu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vmulhu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vmulhu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmulhu_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vmulhu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vmulhu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vmulhu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vmulhu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmulhu_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vmulhu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vmulhu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vmulhu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vmulhu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmulhu_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vmulhu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vmulhu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vmulhu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vmulhu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmulhu_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vmulhu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vmulhu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vmulhu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vmulhu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmulhu_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vmulhu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vmulhu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vmulhu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vmulhu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmulhu_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vmulhu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vmulhu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vmulhu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vmulhu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmulhu_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vmulhu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vmulhu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vmulhu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vmulhu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmulhu_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vmulhu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vmulhu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vmulhu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vmulhu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmulhu_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vmulhu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vmulhu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vmulhu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vmulhu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmulhu_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vmulhu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vmulhu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vmulhu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vmulhu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmulhu_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vmulhu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vmulhu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vmulhu_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vmulhu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vmulhu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmulhu_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vmulhu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vmulhu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vmulhu_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vmulhu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vmulhu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmulhu_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vmulhu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vmulhu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vmulhu_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vmulhu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vmulhu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmulhu_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vmulhu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vmulhu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vmulhu_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vmulhu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vmulhu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmulhu_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vmulhu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vmulhu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vmulhu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vmulhu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmulhu_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vmulhu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vmulhu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vmulhu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vmulhu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmulhu_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vmulhu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vmulhu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vmulhu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vmulhu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmulhu_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vmulhu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vmulhu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vmulhu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vmulhu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmulhu_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vmulhu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vmulhu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vmulhu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vmulhu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmulhu_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vmulhu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vmulhu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vmulhu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vmulhu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmulhu_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vmulhu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vmulhu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vmulhu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vmulhu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmulhu_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vmulhu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vmulhu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vmulhu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vmulhu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmulhu_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vmulhu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vmulhu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vmulhu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vmulhu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmulhu_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vmulhu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vmulhu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vmulhu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vmulhu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmulhu_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vmulhu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vmulhu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vmulhu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vmulhu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmulhu_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vmulhu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vmulhu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vmulhu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vmulhu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vmulhu_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vmulhu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vmulhu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vmulhu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vmulhu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vmulhu_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vmulhu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vmulhu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vmulhu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vmulhu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vmulhu_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vmulhu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vmulhu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vmulhu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vmulhu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vmulhu_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vmulhu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vmulhu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vmulhu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vmulhu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmulhu_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vmulhu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vmulhu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vmulhu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vmulhu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmulhu_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vmulhu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vmulhu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vmulhu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vmulhu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vmulhu_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vmulhu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vmulhu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vmulhu_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vmulhu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vmulhu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmulhu_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vmulhu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vmulhu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vmulhu_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vmulhu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vmulhu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmulhu_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vmulhu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vmulhu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vmulhu_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vmulhu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vmulhu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmulhu_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vmulhu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vmulhu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vmulhu_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vmulhu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vmulhu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vmulhu_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vmulhu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vmulhu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vmulhu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vmulhu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmulhu_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vmulhu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vmulhu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vmulhu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vmulhu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vmulhu_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vmulhu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vmulhu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vmulhu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vmulhu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vmulhu_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vmulhu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vmulhu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vmulhu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vmulhu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vmulhu_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vmulhu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vmulhu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vmulhu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vmulhu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vmulhu_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vmulhu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vmulhu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vmulhu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vmulhu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vmulhu_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vmulhu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vmulhu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vmulhu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vmulhu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vmulhu_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vmulhu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vmulhu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vmulhu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vmulhu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmulhu_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vmulhu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vmulhu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vmulhu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vmulhu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmulhu_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vmulhu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vmulhu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vmulhu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vmulhu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmulhu_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vmulhu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vmulhu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vmulhu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vmulhu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { return __riscv_vmulhu_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vmulhu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vmulhu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vmulhu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vmulhu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmulhu_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vmulhu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vmulhu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vmulhu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vmulhu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmulhu_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vmulhu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vmulhu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vmulhu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vmulhu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmulhu_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vmulhu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vmulhu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vmulhu_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vmulhu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vmulhu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vmulhu_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vmv.c b/auto-generated/policy_funcs/llvm-api-tests/vmv.c index c5ef5a6d2..22cfb2bc5 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vmv.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vmv.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -238,7 +238,8 @@ vuint8m8_t test_vmv_v_x_u8m8_tu(vuint8m8_t vd, uint8_t rs1, size_t vl) { return __riscv_vmv_v_x_u8m8_tu(vd, rs1, vl); } -vuint16mf4_t test_vmv_v_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vmv_v_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, + size_t vl) { return __riscv_vmv_v_v_u16mf4_tu(vd, vs1, vl); } @@ -246,7 +247,8 @@ vuint16mf4_t test_vmv_v_x_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, size_t vl) { return __riscv_vmv_v_x_u16mf4_tu(vd, rs1, vl); } -vuint16mf2_t test_vmv_v_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vmv_v_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, + size_t vl) { return __riscv_vmv_v_v_u16mf2_tu(vd, vs1, vl); } @@ -286,7 +288,8 @@ vuint16m8_t test_vmv_v_x_u16m8_tu(vuint16m8_t vd, uint16_t rs1, size_t vl) { return __riscv_vmv_v_x_u16m8_tu(vd, rs1, vl); } -vuint32mf2_t test_vmv_v_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vmv_v_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, + size_t vl) { return __riscv_vmv_v_v_u32mf2_tu(vd, vs1, vl); } @@ -358,63 +361,78 @@ vuint64m8_t test_vmv_v_x_u64m8_tu(vuint64m8_t vd, uint64_t rs1, size_t vl) { return __riscv_vmv_v_x_u64m8_tu(vd, rs1, vl); } -vfloat16mf4_t test_vmv_v_v_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vmv_v_v_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs1, + size_t vl) { return __riscv_vmv_v_v_f16mf4_tu(vd, vs1, vl); } -vfloat16mf2_t test_vmv_v_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vmv_v_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs1, + size_t vl) { return __riscv_vmv_v_v_f16mf2_tu(vd, vs1, vl); } -vfloat16m1_t test_vmv_v_v_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, size_t vl) { +vfloat16m1_t test_vmv_v_v_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs1, + size_t vl) { return __riscv_vmv_v_v_f16m1_tu(vd, vs1, vl); } -vfloat16m2_t test_vmv_v_v_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, size_t vl) { +vfloat16m2_t test_vmv_v_v_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs1, + size_t vl) { return __riscv_vmv_v_v_f16m2_tu(vd, vs1, vl); } -vfloat16m4_t test_vmv_v_v_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, size_t vl) { +vfloat16m4_t test_vmv_v_v_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs1, + size_t vl) { return __riscv_vmv_v_v_f16m4_tu(vd, vs1, vl); } -vfloat16m8_t test_vmv_v_v_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, size_t vl) { +vfloat16m8_t test_vmv_v_v_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs1, + size_t vl) { return __riscv_vmv_v_v_f16m8_tu(vd, vs1, vl); } -vfloat32mf2_t test_vmv_v_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vmv_v_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs1, + size_t vl) { return __riscv_vmv_v_v_f32mf2_tu(vd, vs1, vl); } -vfloat32m1_t test_vmv_v_v_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, size_t vl) { +vfloat32m1_t test_vmv_v_v_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs1, + size_t vl) { return __riscv_vmv_v_v_f32m1_tu(vd, vs1, vl); } -vfloat32m2_t test_vmv_v_v_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, size_t vl) { +vfloat32m2_t test_vmv_v_v_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs1, + size_t vl) { return __riscv_vmv_v_v_f32m2_tu(vd, vs1, vl); } -vfloat32m4_t test_vmv_v_v_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, size_t vl) { +vfloat32m4_t test_vmv_v_v_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs1, + size_t vl) { return __riscv_vmv_v_v_f32m4_tu(vd, vs1, vl); } -vfloat32m8_t test_vmv_v_v_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, size_t vl) { +vfloat32m8_t test_vmv_v_v_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs1, + size_t vl) { return __riscv_vmv_v_v_f32m8_tu(vd, vs1, vl); } -vfloat64m1_t test_vmv_v_v_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, size_t vl) { +vfloat64m1_t test_vmv_v_v_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs1, + size_t vl) { return __riscv_vmv_v_v_f64m1_tu(vd, vs1, vl); } -vfloat64m2_t test_vmv_v_v_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, size_t vl) { +vfloat64m2_t test_vmv_v_v_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs1, + size_t vl) { return __riscv_vmv_v_v_f64m2_tu(vd, vs1, vl); } -vfloat64m4_t test_vmv_v_v_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, size_t vl) { +vfloat64m4_t test_vmv_v_v_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs1, + size_t vl) { return __riscv_vmv_v_v_f64m4_tu(vd, vs1, vl); } -vfloat64m8_t test_vmv_v_v_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, size_t vl) { +vfloat64m8_t test_vmv_v_v_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs1, + size_t vl) { return __riscv_vmv_v_v_f64m8_tu(vd, vs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vnclip.c b/auto-generated/policy_funcs/llvm-api-tests/vnclip.c index 531e73482..6073448dc 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vnclip.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vnclip.c @@ -5,482 +5,619 @@ #include -vint8mf8_t test_vnclip_wv_i8mf8_tu(vint8mf8_t vd, vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vnclip_wv_i8mf8_tu(vint8mf8_t vd, vint16mf4_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vnclip_wv_i8mf8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vnclip_wx_i8mf8_tu(vint8mf8_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { +vint8mf8_t test_vnclip_wx_i8mf8_tu(vint8mf8_t vd, vint16mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vnclip_wx_i8mf8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wv_i8mf4_tu(vint8mf4_t vd, vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vnclip_wv_i8mf4_tu(vint8mf4_t vd, vint16mf2_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vnclip_wv_i8mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wx_i8mf4_tu(vint8mf4_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { +vint8mf4_t test_vnclip_wx_i8mf4_tu(vint8mf4_t vd, vint16mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vnclip_wx_i8mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wv_i8mf2_tu(vint8mf2_t vd, vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vnclip_wv_i8mf2_tu(vint8mf2_t vd, vint16m1_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vnclip_wv_i8mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wx_i8mf2_tu(vint8mf2_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { +vint8mf2_t test_vnclip_wx_i8mf2_tu(vint8mf2_t vd, vint16m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vnclip_wx_i8mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wv_i8m1_tu(vint8m1_t vd, vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { +vint8m1_t test_vnclip_wv_i8m1_tu(vint8m1_t vd, vint16m2_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vnclip_wv_i8m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wx_i8m1_tu(vint8m1_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { +vint8m1_t test_vnclip_wx_i8m1_tu(vint8m1_t vd, vint16m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vnclip_wx_i8m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wv_i8m2_tu(vint8m2_t vd, vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { +vint8m2_t test_vnclip_wv_i8m2_tu(vint8m2_t vd, vint16m4_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vnclip_wv_i8m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wx_i8m2_tu(vint8m2_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { +vint8m2_t test_vnclip_wx_i8m2_tu(vint8m2_t vd, vint16m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vnclip_wx_i8m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wv_i8m4_tu(vint8m4_t vd, vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { +vint8m4_t test_vnclip_wv_i8m4_tu(vint8m4_t vd, vint16m8_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vnclip_wv_i8m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wx_i8m4_tu(vint8m4_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { +vint8m4_t test_vnclip_wx_i8m4_tu(vint8m4_t vd, vint16m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vnclip_wx_i8m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wv_i16mf4_tu(vint16mf4_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vnclip_wv_i16mf4_tu(vint16mf4_t vd, vint32mf2_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vnclip_wv_i16mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wx_i16mf4_tu(vint16mf4_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { +vint16mf4_t test_vnclip_wx_i16mf4_tu(vint16mf4_t vd, vint32mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclip_wx_i16mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wv_i16mf2_tu(vint16mf2_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vnclip_wv_i16mf2_tu(vint16mf2_t vd, vint32m1_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vnclip_wv_i16mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wx_i16mf2_tu(vint16mf2_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { +vint16mf2_t test_vnclip_wx_i16mf2_tu(vint16mf2_t vd, vint32m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vnclip_wx_i16mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wv_i16m1_tu(vint16m1_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vnclip_wv_i16m1_tu(vint16m1_t vd, vint32m2_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vnclip_wv_i16m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wx_i16m1_tu(vint16m1_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { +vint16m1_t test_vnclip_wx_i16m1_tu(vint16m1_t vd, vint32m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vnclip_wx_i16m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wv_i16m2_tu(vint16m2_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vnclip_wv_i16m2_tu(vint16m2_t vd, vint32m4_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vnclip_wv_i16m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wx_i16m2_tu(vint16m2_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { +vint16m2_t test_vnclip_wx_i16m2_tu(vint16m2_t vd, vint32m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vnclip_wx_i16m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wv_i16m4_tu(vint16m4_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vnclip_wv_i16m4_tu(vint16m4_t vd, vint32m8_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vnclip_wv_i16m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wx_i16m4_tu(vint16m4_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { +vint16m4_t test_vnclip_wx_i16m4_tu(vint16m4_t vd, vint32m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vnclip_wx_i16m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wv_i32mf2_tu(vint32mf2_t vd, vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vnclip_wv_i32mf2_tu(vint32mf2_t vd, vint64m1_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vnclip_wv_i32mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wx_i32mf2_tu(vint32mf2_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { +vint32mf2_t test_vnclip_wx_i32mf2_tu(vint32mf2_t vd, vint64m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vnclip_wx_i32mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wv_i32m1_tu(vint32m1_t vd, vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { +vint32m1_t test_vnclip_wv_i32m1_tu(vint32m1_t vd, vint64m2_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vnclip_wv_i32m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wx_i32m1_tu(vint32m1_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { +vint32m1_t test_vnclip_wx_i32m1_tu(vint32m1_t vd, vint64m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vnclip_wx_i32m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wv_i32m2_tu(vint32m2_t vd, vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { +vint32m2_t test_vnclip_wv_i32m2_tu(vint32m2_t vd, vint64m4_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vnclip_wv_i32m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wx_i32m2_tu(vint32m2_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { +vint32m2_t test_vnclip_wx_i32m2_tu(vint32m2_t vd, vint64m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vnclip_wx_i32m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wv_i32m4_tu(vint32m4_t vd, vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { +vint32m4_t test_vnclip_wv_i32m4_tu(vint32m4_t vd, vint64m8_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vnclip_wv_i32m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wx_i32m4_tu(vint32m4_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { +vint32m4_t test_vnclip_wx_i32m4_tu(vint32m4_t vd, vint64m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vnclip_wx_i32m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vnclip_wv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vnclip_wv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + vint16mf4_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vnclip_wv_i8mf8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vnclip_wx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { +vint8mf8_t test_vnclip_wx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + vint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vnclip_wx_i8mf8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vnclip_wv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + vint16mf2_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vnclip_wv_i8mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { +vint8mf4_t test_vnclip_wx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + vint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vnclip_wx_i8mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vnclip_wv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vnclip_wv_i8mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { +vint8mf2_t test_vnclip_wx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclip_wx_i8mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { +vint8m1_t test_vnclip_wv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vnclip_wv_i8m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { +vint8m1_t test_vnclip_wx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclip_wx_i8m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { +vint8m2_t test_vnclip_wv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vnclip_wv_i8m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { +vint8m2_t test_vnclip_wx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclip_wx_i8m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { +vint8m4_t test_vnclip_wv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vnclip_wv_i8m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { +vint8m4_t test_vnclip_wx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclip_wx_i8m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vnclip_wv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint32mf2_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vnclip_wv_i16mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { +vint16mf4_t test_vnclip_wx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vnclip_wx_i16mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vnclip_wv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint32m1_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vnclip_wv_i16mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { +vint16mf2_t test_vnclip_wx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vnclip_wx_i16mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vnclip_wv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vnclip_wv_i16m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { +vint16m1_t test_vnclip_wx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclip_wx_i16m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vnclip_wv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vnclip_wv_i16m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { +vint16m2_t test_vnclip_wx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclip_wx_i16m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vnclip_wv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vnclip_wv_i16m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { +vint16m4_t test_vnclip_wx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclip_wx_i16m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vnclip_wv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint64m1_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vnclip_wv_i32mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { +vint32mf2_t test_vnclip_wx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint64m1_t vs2, size_t rs1, size_t vl) { return __riscv_vnclip_wx_i32mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { +vint32m1_t test_vnclip_wv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vnclip_wv_i32m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { +vint32m1_t test_vnclip_wx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclip_wx_i32m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { +vint32m2_t test_vnclip_wv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vnclip_wv_i32m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { +vint32m2_t test_vnclip_wx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclip_wx_i32m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { +vint32m4_t test_vnclip_wv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vnclip_wv_i32m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { +vint32m4_t test_vnclip_wx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclip_wx_i32m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vnclip_wv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vnclip_wv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + vint16mf4_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vnclip_wv_i8mf8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vnclip_wx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { +vint8mf8_t test_vnclip_wx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + vint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vnclip_wx_i8mf8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vnclip_wv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + vint16mf2_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vnclip_wv_i8mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { +vint8mf4_t test_vnclip_wx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + vint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vnclip_wx_i8mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vnclip_wv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + vint16m1_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vnclip_wv_i8mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { +vint8mf2_t test_vnclip_wx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + vint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vnclip_wx_i8mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { +vint8m1_t test_vnclip_wv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vnclip_wv_i8m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { +vint8m1_t test_vnclip_wx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclip_wx_i8m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { +vint8m2_t test_vnclip_wv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vnclip_wv_i8m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { +vint8m2_t test_vnclip_wx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclip_wx_i8m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { +vint8m4_t test_vnclip_wv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vnclip_wv_i8m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { +vint8m4_t test_vnclip_wx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclip_wx_i8m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vnclip_wv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint32mf2_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vnclip_wv_i16mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { +vint16mf4_t test_vnclip_wx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vnclip_wx_i16mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vnclip_wv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint32m1_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vnclip_wv_i16mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { +vint16mf2_t test_vnclip_wx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vnclip_wx_i16mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vnclip_wv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + vint32m2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vnclip_wv_i16m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { +vint16m1_t test_vnclip_wx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + vint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vnclip_wx_i16m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vnclip_wv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vnclip_wv_i16m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { +vint16m2_t test_vnclip_wx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclip_wx_i16m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vnclip_wv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vnclip_wv_i16m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { +vint16m4_t test_vnclip_wx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclip_wx_i16m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vnclip_wv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint64m1_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vnclip_wv_i32mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { +vint32mf2_t test_vnclip_wx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint64m1_t vs2, size_t rs1, size_t vl) { return __riscv_vnclip_wx_i32mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { +vint32m1_t test_vnclip_wv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vint64m2_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vnclip_wv_i32m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { +vint32m1_t test_vnclip_wx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vint64m2_t vs2, size_t rs1, size_t vl) { return __riscv_vnclip_wx_i32m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { +vint32m2_t test_vnclip_wv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + vint64m4_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vnclip_wv_i32m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { +vint32m2_t test_vnclip_wx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + vint64m4_t vs2, size_t rs1, size_t vl) { return __riscv_vnclip_wx_i32m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { +vint32m4_t test_vnclip_wv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vnclip_wv_i32m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { +vint32m4_t test_vnclip_wx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclip_wx_i32m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vnclip_wv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vnclip_wv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vnclip_wv_i8mf8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vnclip_wx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { +vint8mf8_t test_vnclip_wx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclip_wx_i8mf8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vnclip_wv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vnclip_wv_i8mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vnclip_wx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { +vint8mf4_t test_vnclip_wx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclip_wx_i8mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vnclip_wv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vnclip_wv_i8mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vnclip_wx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { +vint8mf2_t test_vnclip_wx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclip_wx_i8mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { +vint8m1_t test_vnclip_wv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vnclip_wv_i8m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vnclip_wx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { +vint8m1_t test_vnclip_wx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclip_wx_i8m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { +vint8m2_t test_vnclip_wv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vnclip_wv_i8m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vnclip_wx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { +vint8m2_t test_vnclip_wx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclip_wx_i8m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { +vint8m4_t test_vnclip_wv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vnclip_wv_i8m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vnclip_wx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { +vint8m4_t test_vnclip_wx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclip_wx_i8m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vnclip_wv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint32mf2_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vnclip_wv_i16mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vnclip_wx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { +vint16mf4_t test_vnclip_wx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vnclip_wx_i16mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vnclip_wv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint32m1_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vnclip_wv_i16mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vnclip_wx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { +vint16mf2_t test_vnclip_wx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vnclip_wx_i16mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vnclip_wv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vnclip_wv_i16m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vnclip_wx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { +vint16m1_t test_vnclip_wx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclip_wx_i16m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vnclip_wv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vnclip_wv_i16m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vnclip_wx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { +vint16m2_t test_vnclip_wx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclip_wx_i16m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vnclip_wv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vnclip_wv_i16m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vnclip_wx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { +vint16m4_t test_vnclip_wx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclip_wx_i16m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vnclip_wv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint64m1_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vnclip_wv_i32mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vnclip_wx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { +vint32mf2_t test_vnclip_wx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint64m1_t vs2, size_t rs1, size_t vl) { return __riscv_vnclip_wx_i32mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { +vint32m1_t test_vnclip_wv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vnclip_wv_i32m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vnclip_wx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { +vint32m1_t test_vnclip_wx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclip_wx_i32m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { +vint32m2_t test_vnclip_wv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vnclip_wv_i32m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vnclip_wx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { +vint32m2_t test_vnclip_wx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclip_wx_i32m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { +vint32m4_t test_vnclip_wv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vnclip_wv_i32m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vnclip_wx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { +vint32m4_t test_vnclip_wx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclip_wx_i32m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vnclipu.c b/auto-generated/policy_funcs/llvm-api-tests/vnclipu.c index 037f6d52e..539781050 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vnclipu.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vnclipu.c @@ -5,482 +5,650 @@ #include -vuint8mf8_t test_vnclipu_wv_u8mf8_tu(vuint8mf8_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vnclipu_wv_u8mf8_tu(vuint8mf8_t vd, vuint16mf4_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vnclipu_wv_u8mf8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vnclipu_wx_u8mf8_tu(vuint8mf8_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint8mf8_t test_vnclipu_wx_u8mf8_tu(vuint8mf8_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u8mf8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wv_u8mf4_tu(vuint8mf4_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vnclipu_wv_u8mf4_tu(vuint8mf4_t vd, vuint16mf2_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vnclipu_wv_u8mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wx_u8mf4_tu(vuint8mf4_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint8mf4_t test_vnclipu_wx_u8mf4_tu(vuint8mf4_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u8mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wv_u8mf2_tu(vuint8mf2_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vnclipu_wv_u8mf2_tu(vuint8mf2_t vd, vuint16m1_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vnclipu_wv_u8mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wx_u8mf2_tu(vuint8mf2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint8mf2_t test_vnclipu_wx_u8mf2_tu(vuint8mf2_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u8mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wv_u8m1_tu(vuint8m1_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vnclipu_wv_u8m1_tu(vuint8m1_t vd, vuint16m2_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vnclipu_wv_u8m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wx_u8m1_tu(vuint8m1_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint8m1_t test_vnclipu_wx_u8m1_tu(vuint8m1_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vnclipu_wx_u8m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wv_u8m2_tu(vuint8m2_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vnclipu_wv_u8m2_tu(vuint8m2_t vd, vuint16m4_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vnclipu_wv_u8m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wx_u8m2_tu(vuint8m2_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint8m2_t test_vnclipu_wx_u8m2_tu(vuint8m2_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vnclipu_wx_u8m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wv_u8m4_tu(vuint8m4_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vnclipu_wv_u8m4_tu(vuint8m4_t vd, vuint16m8_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vnclipu_wv_u8m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wx_u8m4_tu(vuint8m4_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint8m4_t test_vnclipu_wx_u8m4_tu(vuint8m4_t vd, vuint16m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vnclipu_wx_u8m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wv_u16mf4_tu(vuint16mf4_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vnclipu_wv_u16mf4_tu(vuint16mf4_t vd, vuint32mf2_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vnclipu_wv_u16mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wx_u16mf4_tu(vuint16mf4_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vnclipu_wx_u16mf4_tu(vuint16mf4_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u16mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wv_u16mf2_tu(vuint16mf2_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vnclipu_wv_u16mf2_tu(vuint16mf2_t vd, vuint32m1_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vnclipu_wv_u16mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wx_u16mf2_tu(vuint16mf2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vnclipu_wx_u16mf2_tu(vuint16mf2_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u16mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wv_u16m1_tu(vuint16m1_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vnclipu_wv_u16m1_tu(vuint16m1_t vd, vuint32m2_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vnclipu_wv_u16m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wx_u16m1_tu(vuint16m1_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vnclipu_wx_u16m1_tu(vuint16m1_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u16m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wv_u16m2_tu(vuint16m2_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vnclipu_wv_u16m2_tu(vuint16m2_t vd, vuint32m4_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vnclipu_wv_u16m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wx_u16m2_tu(vuint16m2_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vnclipu_wx_u16m2_tu(vuint16m2_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u16m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wv_u16m4_tu(vuint16m4_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vnclipu_wv_u16m4_tu(vuint16m4_t vd, vuint32m8_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vnclipu_wv_u16m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wx_u16m4_tu(vuint16m4_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vnclipu_wx_u16m4_tu(vuint16m4_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u16m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wv_u32mf2_tu(vuint32mf2_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vnclipu_wv_u32mf2_tu(vuint32mf2_t vd, vuint64m1_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vnclipu_wv_u32mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wx_u32mf2_tu(vuint32mf2_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vnclipu_wx_u32mf2_tu(vuint32mf2_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u32mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wv_u32m1_tu(vuint32m1_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vnclipu_wv_u32m1_tu(vuint32m1_t vd, vuint64m2_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vnclipu_wv_u32m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wx_u32m1_tu(vuint32m1_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vnclipu_wx_u32m1_tu(vuint32m1_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u32m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wv_u32m2_tu(vuint32m2_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vnclipu_wv_u32m2_tu(vuint32m2_t vd, vuint64m4_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vnclipu_wv_u32m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wx_u32m2_tu(vuint32m2_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vnclipu_wx_u32m2_tu(vuint32m2_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u32m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wv_u32m4_tu(vuint32m4_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vnclipu_wv_u32m4_tu(vuint32m4_t vd, vuint64m8_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vnclipu_wv_u32m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wx_u32m4_tu(vuint32m4_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vnclipu_wx_u32m4_tu(vuint32m4_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u32m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vnclipu_wv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vnclipu_wv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint16mf4_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u8mf8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vnclipu_wx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint8mf8_t test_vnclipu_wx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u8mf8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vnclipu_wv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint16mf2_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u8mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint8mf4_t test_vnclipu_wx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u8mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vnclipu_wv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint16m1_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u8mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint8mf2_t test_vnclipu_wx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u8mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vnclipu_wv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vnclipu_wv_u8m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint8m1_t test_vnclipu_wx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u8m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vnclipu_wv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vnclipu_wv_u8m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint8m2_t test_vnclipu_wx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u8m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vnclipu_wv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vnclipu_wv_u8m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint8m4_t test_vnclipu_wx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u8m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vnclipu_wv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint32mf2_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u16mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vnclipu_wx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint32mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vnclipu_wx_u16mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vnclipu_wv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint32m1_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u16mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vnclipu_wx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint32m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vnclipu_wx_u16mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vnclipu_wv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint32m2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u16m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vnclipu_wx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u16m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vnclipu_wv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint32m4_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u16m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vnclipu_wx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u16m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vnclipu_wv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint32m8_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u16m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vnclipu_wx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u16m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vnclipu_wv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint64m1_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u32mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vnclipu_wx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint64m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vnclipu_wx_u32mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vnclipu_wv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint64m2_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u32m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vnclipu_wx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u32m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vnclipu_wv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint64m4_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u32m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vnclipu_wx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u32m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vnclipu_wv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint64m8_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u32m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vnclipu_wx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u32m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vnclipu_wv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vnclipu_wv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint16mf4_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u8mf8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vnclipu_wx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint8mf8_t test_vnclipu_wx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint16mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vnclipu_wx_u8mf8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vnclipu_wv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint16mf2_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u8mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint8mf4_t test_vnclipu_wx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint16mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vnclipu_wx_u8mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vnclipu_wv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint16m1_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u8mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint8mf2_t test_vnclipu_wx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u8mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vnclipu_wv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + vuint16m2_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u8m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint8m1_t test_vnclipu_wx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u8m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vnclipu_wv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + vuint16m4_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u8m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint8m2_t test_vnclipu_wx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u8m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vnclipu_wv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + vuint16m8_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u8m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint8m4_t test_vnclipu_wx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u8m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vnclipu_wv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint32mf2_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u16mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vnclipu_wx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint32mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vnclipu_wx_u16mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vnclipu_wv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint32m1_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u16mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vnclipu_wx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint32m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vnclipu_wx_u16mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vnclipu_wv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint32m2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u16m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vnclipu_wx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u16m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vnclipu_wv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint32m4_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u16m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vnclipu_wx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u16m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vnclipu_wv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint32m8_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u16m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vnclipu_wx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u16m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vnclipu_wv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint64m1_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u32mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vnclipu_wx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint64m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vnclipu_wx_u32mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vnclipu_wv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint64m2_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u32m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vnclipu_wx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u32m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vnclipu_wv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint64m4_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u32m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vnclipu_wx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u32m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vnclipu_wv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint64m8_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u32m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vnclipu_wx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u32m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vnclipu_wv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vnclipu_wv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint16mf4_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u8mf8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vnclipu_wx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint8mf8_t test_vnclipu_wx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u8mf8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vnclipu_wv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint16mf2_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u8mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vnclipu_wx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint8mf4_t test_vnclipu_wx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u8mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vnclipu_wv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint16m1_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u8mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vnclipu_wx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint8mf2_t test_vnclipu_wx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u8mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vnclipu_wv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vnclipu_wv_u8m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vnclipu_wx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint8m1_t test_vnclipu_wx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u8m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vnclipu_wv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vnclipu_wv_u8m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vnclipu_wx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint8m2_t test_vnclipu_wx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u8m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vnclipu_wv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vnclipu_wv_u8m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vnclipu_wx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint8m4_t test_vnclipu_wx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u8m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vnclipu_wv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint32mf2_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u16mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vnclipu_wx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vnclipu_wx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint32mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vnclipu_wx_u16mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vnclipu_wv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint32m1_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u16mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vnclipu_wx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vnclipu_wx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u16mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vnclipu_wv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint32m2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u16m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vnclipu_wx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vnclipu_wx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u16m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vnclipu_wv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint32m4_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u16m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vnclipu_wx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vnclipu_wx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u16m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vnclipu_wv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint32m8_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u16m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vnclipu_wx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vnclipu_wx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u16m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vnclipu_wv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint64m1_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u32mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vnclipu_wx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vnclipu_wx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u32mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vnclipu_wv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint64m2_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u32m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vnclipu_wx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vnclipu_wx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u32m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vnclipu_wv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint64m4_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u32m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vnclipu_wx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vnclipu_wx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u32m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vnclipu_wv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint64m8_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vnclipu_wv_u32m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vnclipu_wx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vnclipu_wx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl) { return __riscv_vnclipu_wx_u32m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vncvt.c b/auto-generated/policy_funcs/llvm-api-tests/vncvt.c index f2e94c366..fcaa4fa59 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vncvt.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vncvt.c @@ -5,11 +5,13 @@ #include -vint8mf8_t test_vncvt_x_x_w_i8mf8_tu(vint8mf8_t vd, vint16mf4_t vs2, size_t vl) { +vint8mf8_t test_vncvt_x_x_w_i8mf8_tu(vint8mf8_t vd, vint16mf4_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_i8mf8_tu(vd, vs2, vl); } -vint8mf4_t test_vncvt_x_x_w_i8mf4_tu(vint8mf4_t vd, vint16mf2_t vs2, size_t vl) { +vint8mf4_t test_vncvt_x_x_w_i8mf4_tu(vint8mf4_t vd, vint16mf2_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_i8mf4_tu(vd, vs2, vl); } @@ -29,15 +31,18 @@ vint8m4_t test_vncvt_x_x_w_i8m4_tu(vint8m4_t vd, vint16m8_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i8m4_tu(vd, vs2, vl); } -vuint8mf8_t test_vncvt_x_x_w_u8mf8_tu(vuint8mf8_t vd, vuint16mf4_t vs2, size_t vl) { +vuint8mf8_t test_vncvt_x_x_w_u8mf8_tu(vuint8mf8_t vd, vuint16mf4_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_u8mf8_tu(vd, vs2, vl); } -vuint8mf4_t test_vncvt_x_x_w_u8mf4_tu(vuint8mf4_t vd, vuint16mf2_t vs2, size_t vl) { +vuint8mf4_t test_vncvt_x_x_w_u8mf4_tu(vuint8mf4_t vd, vuint16mf2_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_u8mf4_tu(vd, vs2, vl); } -vuint8mf2_t test_vncvt_x_x_w_u8mf2_tu(vuint8mf2_t vd, vuint16m1_t vs2, size_t vl) { +vuint8mf2_t test_vncvt_x_x_w_u8mf2_tu(vuint8mf2_t vd, vuint16m1_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_u8mf2_tu(vd, vs2, vl); } @@ -53,11 +58,13 @@ vuint8m4_t test_vncvt_x_x_w_u8m4_tu(vuint8m4_t vd, vuint16m8_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u8m4_tu(vd, vs2, vl); } -vint16mf4_t test_vncvt_x_x_w_i16mf4_tu(vint16mf4_t vd, vint32mf2_t vs2, size_t vl) { +vint16mf4_t test_vncvt_x_x_w_i16mf4_tu(vint16mf4_t vd, vint32mf2_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_i16mf4_tu(vd, vs2, vl); } -vint16mf2_t test_vncvt_x_x_w_i16mf2_tu(vint16mf2_t vd, vint32m1_t vs2, size_t vl) { +vint16mf2_t test_vncvt_x_x_w_i16mf2_tu(vint16mf2_t vd, vint32m1_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_i16mf2_tu(vd, vs2, vl); } @@ -73,27 +80,33 @@ vint16m4_t test_vncvt_x_x_w_i16m4_tu(vint16m4_t vd, vint32m8_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i16m4_tu(vd, vs2, vl); } -vuint16mf4_t test_vncvt_x_x_w_u16mf4_tu(vuint16mf4_t vd, vuint32mf2_t vs2, size_t vl) { +vuint16mf4_t test_vncvt_x_x_w_u16mf4_tu(vuint16mf4_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_u16mf4_tu(vd, vs2, vl); } -vuint16mf2_t test_vncvt_x_x_w_u16mf2_tu(vuint16mf2_t vd, vuint32m1_t vs2, size_t vl) { +vuint16mf2_t test_vncvt_x_x_w_u16mf2_tu(vuint16mf2_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_u16mf2_tu(vd, vs2, vl); } -vuint16m1_t test_vncvt_x_x_w_u16m1_tu(vuint16m1_t vd, vuint32m2_t vs2, size_t vl) { +vuint16m1_t test_vncvt_x_x_w_u16m1_tu(vuint16m1_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_u16m1_tu(vd, vs2, vl); } -vuint16m2_t test_vncvt_x_x_w_u16m2_tu(vuint16m2_t vd, vuint32m4_t vs2, size_t vl) { +vuint16m2_t test_vncvt_x_x_w_u16m2_tu(vuint16m2_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_u16m2_tu(vd, vs2, vl); } -vuint16m4_t test_vncvt_x_x_w_u16m4_tu(vuint16m4_t vd, vuint32m8_t vs2, size_t vl) { +vuint16m4_t test_vncvt_x_x_w_u16m4_tu(vuint16m4_t vd, vuint32m8_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_u16m4_tu(vd, vs2, vl); } -vint32mf2_t test_vncvt_x_x_w_i32mf2_tu(vint32mf2_t vd, vint64m1_t vs2, size_t vl) { +vint32mf2_t test_vncvt_x_x_w_i32mf2_tu(vint32mf2_t vd, vint64m1_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_i32mf2_tu(vd, vs2, vl); } @@ -109,378 +122,472 @@ vint32m4_t test_vncvt_x_x_w_i32m4_tu(vint32m4_t vd, vint64m8_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i32m4_tu(vd, vs2, vl); } -vuint32mf2_t test_vncvt_x_x_w_u32mf2_tu(vuint32mf2_t vd, vuint64m1_t vs2, size_t vl) { +vuint32mf2_t test_vncvt_x_x_w_u32mf2_tu(vuint32mf2_t vd, vuint64m1_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vncvt_x_x_w_u32m1_tu(vuint32m1_t vd, vuint64m2_t vs2, size_t vl) { +vuint32m1_t test_vncvt_x_x_w_u32m1_tu(vuint32m1_t vd, vuint64m2_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vncvt_x_x_w_u32m2_tu(vuint32m2_t vd, vuint64m4_t vs2, size_t vl) { +vuint32m2_t test_vncvt_x_x_w_u32m2_tu(vuint32m2_t vd, vuint64m4_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vncvt_x_x_w_u32m4_tu(vuint32m4_t vd, vuint64m8_t vs2, size_t vl) { +vuint32m4_t test_vncvt_x_x_w_u32m4_tu(vuint32m4_t vd, vuint64m8_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_u32m4_tu(vd, vs2, vl); } -vint8mf8_t test_vncvt_x_x_w_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, size_t vl) { +vint8mf8_t test_vncvt_x_x_w_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + vint16mf4_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i8mf8_tum(vm, vd, vs2, vl); } -vint8mf4_t test_vncvt_x_x_w_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, size_t vl) { +vint8mf4_t test_vncvt_x_x_w_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + vint16mf2_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i8mf4_tum(vm, vd, vs2, vl); } -vint8mf2_t test_vncvt_x_x_w_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, size_t vl) { +vint8mf2_t test_vncvt_x_x_w_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, + vint16m1_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i8mf2_tum(vm, vd, vs2, vl); } -vint8m1_t test_vncvt_x_x_w_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, size_t vl) { +vint8m1_t test_vncvt_x_x_w_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_i8m1_tum(vm, vd, vs2, vl); } -vint8m2_t test_vncvt_x_x_w_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, size_t vl) { +vint8m2_t test_vncvt_x_x_w_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_i8m2_tum(vm, vd, vs2, vl); } -vint8m4_t test_vncvt_x_x_w_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, size_t vl) { +vint8m4_t test_vncvt_x_x_w_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_i8m4_tum(vm, vd, vs2, vl); } -vuint8mf8_t test_vncvt_x_x_w_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, size_t vl) { +vuint8mf8_t test_vncvt_x_x_w_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint16mf4_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u8mf8_tum(vm, vd, vs2, vl); } -vuint8mf4_t test_vncvt_x_x_w_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, size_t vl) { +vuint8mf4_t test_vncvt_x_x_w_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint16mf2_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u8mf4_tum(vm, vd, vs2, vl); } -vuint8mf2_t test_vncvt_x_x_w_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, size_t vl) { +vuint8mf2_t test_vncvt_x_x_w_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint16m1_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u8mf2_tum(vm, vd, vs2, vl); } -vuint8m1_t test_vncvt_x_x_w_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, size_t vl) { +vuint8m1_t test_vncvt_x_x_w_u8m1_tum(vbool8_t vm, vuint8m1_t vd, + vuint16m2_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u8m1_tum(vm, vd, vs2, vl); } -vuint8m2_t test_vncvt_x_x_w_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, size_t vl) { +vuint8m2_t test_vncvt_x_x_w_u8m2_tum(vbool4_t vm, vuint8m2_t vd, + vuint16m4_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u8m2_tum(vm, vd, vs2, vl); } -vuint8m4_t test_vncvt_x_x_w_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, size_t vl) { +vuint8m4_t test_vncvt_x_x_w_u8m4_tum(vbool2_t vm, vuint8m4_t vd, + vuint16m8_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u8m4_tum(vm, vd, vs2, vl); } -vint16mf4_t test_vncvt_x_x_w_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, size_t vl) { +vint16mf4_t test_vncvt_x_x_w_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint32mf2_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i16mf4_tum(vm, vd, vs2, vl); } -vint16mf2_t test_vncvt_x_x_w_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, size_t vl) { +vint16mf2_t test_vncvt_x_x_w_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint32m1_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i16mf2_tum(vm, vd, vs2, vl); } -vint16m1_t test_vncvt_x_x_w_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, size_t vl) { +vint16m1_t test_vncvt_x_x_w_i16m1_tum(vbool16_t vm, vint16m1_t vd, + vint32m2_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i16m1_tum(vm, vd, vs2, vl); } -vint16m2_t test_vncvt_x_x_w_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, size_t vl) { +vint16m2_t test_vncvt_x_x_w_i16m2_tum(vbool8_t vm, vint16m2_t vd, + vint32m4_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i16m2_tum(vm, vd, vs2, vl); } -vint16m4_t test_vncvt_x_x_w_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, size_t vl) { +vint16m4_t test_vncvt_x_x_w_i16m4_tum(vbool4_t vm, vint16m4_t vd, + vint32m8_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i16m4_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vncvt_x_x_w_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, size_t vl) { +vuint16mf4_t test_vncvt_x_x_w_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint32mf2_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u16mf4_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vncvt_x_x_w_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, size_t vl) { +vuint16mf2_t test_vncvt_x_x_w_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint32m1_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u16mf2_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vncvt_x_x_w_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, size_t vl) { +vuint16m1_t test_vncvt_x_x_w_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint32m2_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u16m1_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vncvt_x_x_w_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, size_t vl) { +vuint16m2_t test_vncvt_x_x_w_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint32m4_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u16m2_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vncvt_x_x_w_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, size_t vl) { +vuint16m4_t test_vncvt_x_x_w_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint32m8_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u16m4_tum(vm, vd, vs2, vl); } -vint32mf2_t test_vncvt_x_x_w_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, size_t vl) { +vint32mf2_t test_vncvt_x_x_w_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint64m1_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i32mf2_tum(vm, vd, vs2, vl); } -vint32m1_t test_vncvt_x_x_w_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, size_t vl) { +vint32m1_t test_vncvt_x_x_w_i32m1_tum(vbool32_t vm, vint32m1_t vd, + vint64m2_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i32m1_tum(vm, vd, vs2, vl); } -vint32m2_t test_vncvt_x_x_w_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, size_t vl) { +vint32m2_t test_vncvt_x_x_w_i32m2_tum(vbool16_t vm, vint32m2_t vd, + vint64m4_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i32m2_tum(vm, vd, vs2, vl); } -vint32m4_t test_vncvt_x_x_w_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, size_t vl) { +vint32m4_t test_vncvt_x_x_w_i32m4_tum(vbool8_t vm, vint32m4_t vd, + vint64m8_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i32m4_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vncvt_x_x_w_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, size_t vl) { +vuint32mf2_t test_vncvt_x_x_w_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint64m1_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u32mf2_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vncvt_x_x_w_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, size_t vl) { +vuint32m1_t test_vncvt_x_x_w_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint64m2_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u32m1_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vncvt_x_x_w_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, size_t vl) { +vuint32m2_t test_vncvt_x_x_w_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint64m4_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u32m2_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vncvt_x_x_w_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, size_t vl) { +vuint32m4_t test_vncvt_x_x_w_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint64m8_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u32m4_tum(vm, vd, vs2, vl); } -vint8mf8_t test_vncvt_x_x_w_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, size_t vl) { +vint8mf8_t test_vncvt_x_x_w_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + vint16mf4_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i8mf8_tumu(vm, vd, vs2, vl); } -vint8mf4_t test_vncvt_x_x_w_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, size_t vl) { +vint8mf4_t test_vncvt_x_x_w_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + vint16mf2_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i8mf4_tumu(vm, vd, vs2, vl); } -vint8mf2_t test_vncvt_x_x_w_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, size_t vl) { +vint8mf2_t test_vncvt_x_x_w_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + vint16m1_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i8mf2_tumu(vm, vd, vs2, vl); } -vint8m1_t test_vncvt_x_x_w_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, size_t vl) { +vint8m1_t test_vncvt_x_x_w_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_i8m1_tumu(vm, vd, vs2, vl); } -vint8m2_t test_vncvt_x_x_w_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, size_t vl) { +vint8m2_t test_vncvt_x_x_w_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_i8m2_tumu(vm, vd, vs2, vl); } -vint8m4_t test_vncvt_x_x_w_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, size_t vl) { +vint8m4_t test_vncvt_x_x_w_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_i8m4_tumu(vm, vd, vs2, vl); } -vuint8mf8_t test_vncvt_x_x_w_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, size_t vl) { +vuint8mf8_t test_vncvt_x_x_w_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint16mf4_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u8mf8_tumu(vm, vd, vs2, vl); } -vuint8mf4_t test_vncvt_x_x_w_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, size_t vl) { +vuint8mf4_t test_vncvt_x_x_w_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint16mf2_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u8mf4_tumu(vm, vd, vs2, vl); } -vuint8mf2_t test_vncvt_x_x_w_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, size_t vl) { +vuint8mf2_t test_vncvt_x_x_w_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint16m1_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u8mf2_tumu(vm, vd, vs2, vl); } -vuint8m1_t test_vncvt_x_x_w_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, size_t vl) { +vuint8m1_t test_vncvt_x_x_w_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + vuint16m2_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u8m1_tumu(vm, vd, vs2, vl); } -vuint8m2_t test_vncvt_x_x_w_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, size_t vl) { +vuint8m2_t test_vncvt_x_x_w_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + vuint16m4_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u8m2_tumu(vm, vd, vs2, vl); } -vuint8m4_t test_vncvt_x_x_w_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, size_t vl) { +vuint8m4_t test_vncvt_x_x_w_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + vuint16m8_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u8m4_tumu(vm, vd, vs2, vl); } -vint16mf4_t test_vncvt_x_x_w_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, size_t vl) { +vint16mf4_t test_vncvt_x_x_w_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint32mf2_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i16mf4_tumu(vm, vd, vs2, vl); } -vint16mf2_t test_vncvt_x_x_w_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, size_t vl) { +vint16mf2_t test_vncvt_x_x_w_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint32m1_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i16mf2_tumu(vm, vd, vs2, vl); } -vint16m1_t test_vncvt_x_x_w_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, size_t vl) { +vint16m1_t test_vncvt_x_x_w_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + vint32m2_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i16m1_tumu(vm, vd, vs2, vl); } -vint16m2_t test_vncvt_x_x_w_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, size_t vl) { +vint16m2_t test_vncvt_x_x_w_i16m2_tumu(vbool8_t vm, vint16m2_t vd, + vint32m4_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i16m2_tumu(vm, vd, vs2, vl); } -vint16m4_t test_vncvt_x_x_w_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, size_t vl) { +vint16m4_t test_vncvt_x_x_w_i16m4_tumu(vbool4_t vm, vint16m4_t vd, + vint32m8_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i16m4_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vncvt_x_x_w_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, size_t vl) { +vuint16mf4_t test_vncvt_x_x_w_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint32mf2_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u16mf4_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vncvt_x_x_w_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, size_t vl) { +vuint16mf2_t test_vncvt_x_x_w_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint32m1_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u16mf2_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vncvt_x_x_w_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, size_t vl) { +vuint16m1_t test_vncvt_x_x_w_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint32m2_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u16m1_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vncvt_x_x_w_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, size_t vl) { +vuint16m2_t test_vncvt_x_x_w_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint32m4_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u16m2_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vncvt_x_x_w_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, size_t vl) { +vuint16m4_t test_vncvt_x_x_w_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint32m8_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u16m4_tumu(vm, vd, vs2, vl); } -vint32mf2_t test_vncvt_x_x_w_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, size_t vl) { +vint32mf2_t test_vncvt_x_x_w_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint64m1_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i32mf2_tumu(vm, vd, vs2, vl); } -vint32m1_t test_vncvt_x_x_w_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, size_t vl) { +vint32m1_t test_vncvt_x_x_w_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vint64m2_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i32m1_tumu(vm, vd, vs2, vl); } -vint32m2_t test_vncvt_x_x_w_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, size_t vl) { +vint32m2_t test_vncvt_x_x_w_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + vint64m4_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i32m2_tumu(vm, vd, vs2, vl); } -vint32m4_t test_vncvt_x_x_w_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, size_t vl) { +vint32m4_t test_vncvt_x_x_w_i32m4_tumu(vbool8_t vm, vint32m4_t vd, + vint64m8_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i32m4_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vncvt_x_x_w_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, size_t vl) { +vuint32mf2_t test_vncvt_x_x_w_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint64m1_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u32mf2_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vncvt_x_x_w_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, size_t vl) { +vuint32m1_t test_vncvt_x_x_w_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint64m2_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u32m1_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vncvt_x_x_w_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, size_t vl) { +vuint32m2_t test_vncvt_x_x_w_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint64m4_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u32m2_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vncvt_x_x_w_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, size_t vl) { +vuint32m4_t test_vncvt_x_x_w_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint64m8_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u32m4_tumu(vm, vd, vs2, vl); } -vint8mf8_t test_vncvt_x_x_w_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, size_t vl) { +vint8mf8_t test_vncvt_x_x_w_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, + vint16mf4_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i8mf8_mu(vm, vd, vs2, vl); } -vint8mf4_t test_vncvt_x_x_w_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, size_t vl) { +vint8mf4_t test_vncvt_x_x_w_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, + vint16mf2_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i8mf4_mu(vm, vd, vs2, vl); } -vint8mf2_t test_vncvt_x_x_w_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, size_t vl) { +vint8mf2_t test_vncvt_x_x_w_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, + vint16m1_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i8mf2_mu(vm, vd, vs2, vl); } -vint8m1_t test_vncvt_x_x_w_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, size_t vl) { +vint8m1_t test_vncvt_x_x_w_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_i8m1_mu(vm, vd, vs2, vl); } -vint8m2_t test_vncvt_x_x_w_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, size_t vl) { +vint8m2_t test_vncvt_x_x_w_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_i8m2_mu(vm, vd, vs2, vl); } -vint8m4_t test_vncvt_x_x_w_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, size_t vl) { +vint8m4_t test_vncvt_x_x_w_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_i8m4_mu(vm, vd, vs2, vl); } -vuint8mf8_t test_vncvt_x_x_w_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, size_t vl) { +vuint8mf8_t test_vncvt_x_x_w_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint16mf4_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u8mf8_mu(vm, vd, vs2, vl); } -vuint8mf4_t test_vncvt_x_x_w_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, size_t vl) { +vuint8mf4_t test_vncvt_x_x_w_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint16mf2_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u8mf4_mu(vm, vd, vs2, vl); } -vuint8mf2_t test_vncvt_x_x_w_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, size_t vl) { +vuint8mf2_t test_vncvt_x_x_w_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint16m1_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u8mf2_mu(vm, vd, vs2, vl); } -vuint8m1_t test_vncvt_x_x_w_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, size_t vl) { +vuint8m1_t test_vncvt_x_x_w_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_u8m1_mu(vm, vd, vs2, vl); } -vuint8m2_t test_vncvt_x_x_w_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, size_t vl) { +vuint8m2_t test_vncvt_x_x_w_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_u8m2_mu(vm, vd, vs2, vl); } -vuint8m4_t test_vncvt_x_x_w_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, size_t vl) { +vuint8m4_t test_vncvt_x_x_w_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_u8m4_mu(vm, vd, vs2, vl); } -vint16mf4_t test_vncvt_x_x_w_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, size_t vl) { +vint16mf4_t test_vncvt_x_x_w_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint32mf2_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i16mf4_mu(vm, vd, vs2, vl); } -vint16mf2_t test_vncvt_x_x_w_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, size_t vl) { +vint16mf2_t test_vncvt_x_x_w_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint32m1_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i16mf2_mu(vm, vd, vs2, vl); } -vint16m1_t test_vncvt_x_x_w_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, size_t vl) { +vint16m1_t test_vncvt_x_x_w_i16m1_mu(vbool16_t vm, vint16m1_t vd, + vint32m2_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i16m1_mu(vm, vd, vs2, vl); } -vint16m2_t test_vncvt_x_x_w_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, size_t vl) { +vint16m2_t test_vncvt_x_x_w_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_i16m2_mu(vm, vd, vs2, vl); } -vint16m4_t test_vncvt_x_x_w_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, size_t vl) { +vint16m4_t test_vncvt_x_x_w_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_i16m4_mu(vm, vd, vs2, vl); } -vuint16mf4_t test_vncvt_x_x_w_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, size_t vl) { +vuint16mf4_t test_vncvt_x_x_w_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint32mf2_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u16mf4_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vncvt_x_x_w_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, size_t vl) { +vuint16mf2_t test_vncvt_x_x_w_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint32m1_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u16mf2_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vncvt_x_x_w_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, size_t vl) { +vuint16m1_t test_vncvt_x_x_w_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint32m2_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u16m1_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vncvt_x_x_w_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, size_t vl) { +vuint16m2_t test_vncvt_x_x_w_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint32m4_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u16m2_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vncvt_x_x_w_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, size_t vl) { +vuint16m4_t test_vncvt_x_x_w_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint32m8_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u16m4_mu(vm, vd, vs2, vl); } -vint32mf2_t test_vncvt_x_x_w_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, size_t vl) { +vint32mf2_t test_vncvt_x_x_w_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint64m1_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i32mf2_mu(vm, vd, vs2, vl); } -vint32m1_t test_vncvt_x_x_w_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, size_t vl) { +vint32m1_t test_vncvt_x_x_w_i32m1_mu(vbool32_t vm, vint32m1_t vd, + vint64m2_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i32m1_mu(vm, vd, vs2, vl); } -vint32m2_t test_vncvt_x_x_w_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, size_t vl) { +vint32m2_t test_vncvt_x_x_w_i32m2_mu(vbool16_t vm, vint32m2_t vd, + vint64m4_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_i32m2_mu(vm, vd, vs2, vl); } -vint32m4_t test_vncvt_x_x_w_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, size_t vl) { +vint32m4_t test_vncvt_x_x_w_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, + size_t vl) { return __riscv_vncvt_x_x_w_i32m4_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vncvt_x_x_w_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, size_t vl) { +vuint32mf2_t test_vncvt_x_x_w_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint64m1_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u32mf2_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vncvt_x_x_w_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, size_t vl) { +vuint32m1_t test_vncvt_x_x_w_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint64m2_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u32m1_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vncvt_x_x_w_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, size_t vl) { +vuint32m2_t test_vncvt_x_x_w_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint64m4_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u32m2_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vncvt_x_x_w_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, size_t vl) { +vuint32m4_t test_vncvt_x_x_w_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint64m8_t vs2, size_t vl) { return __riscv_vncvt_x_x_w_u32m4_mu(vm, vd, vs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vneg.c b/auto-generated/policy_funcs/llvm-api-tests/vneg.c index 0f5824e56..2a722c3a9 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vneg.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vneg.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s @@ -94,266 +94,332 @@ vint64m8_t test_vneg_v_i64m8_tu(vint64m8_t vd, vint64m8_t vs, size_t vl) { return __riscv_vneg_v_i64m8_tu(vd, vs, vl); } -vint8mf8_t test_vneg_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs, size_t vl) { +vint8mf8_t test_vneg_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs, + size_t vl) { return __riscv_vneg_v_i8mf8_tum(vm, vd, vs, vl); } -vint8mf4_t test_vneg_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs, size_t vl) { +vint8mf4_t test_vneg_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs, + size_t vl) { return __riscv_vneg_v_i8mf4_tum(vm, vd, vs, vl); } -vint8mf2_t test_vneg_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs, size_t vl) { +vint8mf2_t test_vneg_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs, + size_t vl) { return __riscv_vneg_v_i8mf2_tum(vm, vd, vs, vl); } -vint8m1_t test_vneg_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs, size_t vl) { +vint8m1_t test_vneg_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs, + size_t vl) { return __riscv_vneg_v_i8m1_tum(vm, vd, vs, vl); } -vint8m2_t test_vneg_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs, size_t vl) { +vint8m2_t test_vneg_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs, + size_t vl) { return __riscv_vneg_v_i8m2_tum(vm, vd, vs, vl); } -vint8m4_t test_vneg_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs, size_t vl) { +vint8m4_t test_vneg_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs, + size_t vl) { return __riscv_vneg_v_i8m4_tum(vm, vd, vs, vl); } -vint8m8_t test_vneg_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs, size_t vl) { +vint8m8_t test_vneg_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs, + size_t vl) { return __riscv_vneg_v_i8m8_tum(vm, vd, vs, vl); } -vint16mf4_t test_vneg_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs, size_t vl) { +vint16mf4_t test_vneg_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs, + size_t vl) { return __riscv_vneg_v_i16mf4_tum(vm, vd, vs, vl); } -vint16mf2_t test_vneg_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs, size_t vl) { +vint16mf2_t test_vneg_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs, + size_t vl) { return __riscv_vneg_v_i16mf2_tum(vm, vd, vs, vl); } -vint16m1_t test_vneg_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs, size_t vl) { +vint16m1_t test_vneg_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs, + size_t vl) { return __riscv_vneg_v_i16m1_tum(vm, vd, vs, vl); } -vint16m2_t test_vneg_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs, size_t vl) { +vint16m2_t test_vneg_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs, + size_t vl) { return __riscv_vneg_v_i16m2_tum(vm, vd, vs, vl); } -vint16m4_t test_vneg_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs, size_t vl) { +vint16m4_t test_vneg_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs, + size_t vl) { return __riscv_vneg_v_i16m4_tum(vm, vd, vs, vl); } -vint16m8_t test_vneg_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs, size_t vl) { +vint16m8_t test_vneg_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs, + size_t vl) { return __riscv_vneg_v_i16m8_tum(vm, vd, vs, vl); } -vint32mf2_t test_vneg_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs, size_t vl) { +vint32mf2_t test_vneg_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs, + size_t vl) { return __riscv_vneg_v_i32mf2_tum(vm, vd, vs, vl); } -vint32m1_t test_vneg_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs, size_t vl) { +vint32m1_t test_vneg_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs, + size_t vl) { return __riscv_vneg_v_i32m1_tum(vm, vd, vs, vl); } -vint32m2_t test_vneg_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs, size_t vl) { +vint32m2_t test_vneg_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs, + size_t vl) { return __riscv_vneg_v_i32m2_tum(vm, vd, vs, vl); } -vint32m4_t test_vneg_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs, size_t vl) { +vint32m4_t test_vneg_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs, + size_t vl) { return __riscv_vneg_v_i32m4_tum(vm, vd, vs, vl); } -vint32m8_t test_vneg_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs, size_t vl) { +vint32m8_t test_vneg_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs, + size_t vl) { return __riscv_vneg_v_i32m8_tum(vm, vd, vs, vl); } -vint64m1_t test_vneg_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs, size_t vl) { +vint64m1_t test_vneg_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs, + size_t vl) { return __riscv_vneg_v_i64m1_tum(vm, vd, vs, vl); } -vint64m2_t test_vneg_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs, size_t vl) { +vint64m2_t test_vneg_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs, + size_t vl) { return __riscv_vneg_v_i64m2_tum(vm, vd, vs, vl); } -vint64m4_t test_vneg_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs, size_t vl) { +vint64m4_t test_vneg_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs, + size_t vl) { return __riscv_vneg_v_i64m4_tum(vm, vd, vs, vl); } -vint64m8_t test_vneg_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs, size_t vl) { +vint64m8_t test_vneg_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs, + size_t vl) { return __riscv_vneg_v_i64m8_tum(vm, vd, vs, vl); } -vint8mf8_t test_vneg_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs, size_t vl) { +vint8mf8_t test_vneg_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs, + size_t vl) { return __riscv_vneg_v_i8mf8_tumu(vm, vd, vs, vl); } -vint8mf4_t test_vneg_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs, size_t vl) { +vint8mf4_t test_vneg_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs, + size_t vl) { return __riscv_vneg_v_i8mf4_tumu(vm, vd, vs, vl); } -vint8mf2_t test_vneg_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs, size_t vl) { +vint8mf2_t test_vneg_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs, + size_t vl) { return __riscv_vneg_v_i8mf2_tumu(vm, vd, vs, vl); } -vint8m1_t test_vneg_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs, size_t vl) { +vint8m1_t test_vneg_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs, + size_t vl) { return __riscv_vneg_v_i8m1_tumu(vm, vd, vs, vl); } -vint8m2_t test_vneg_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs, size_t vl) { +vint8m2_t test_vneg_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs, + size_t vl) { return __riscv_vneg_v_i8m2_tumu(vm, vd, vs, vl); } -vint8m4_t test_vneg_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs, size_t vl) { +vint8m4_t test_vneg_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs, + size_t vl) { return __riscv_vneg_v_i8m4_tumu(vm, vd, vs, vl); } -vint8m8_t test_vneg_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs, size_t vl) { +vint8m8_t test_vneg_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs, + size_t vl) { return __riscv_vneg_v_i8m8_tumu(vm, vd, vs, vl); } -vint16mf4_t test_vneg_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs, size_t vl) { +vint16mf4_t test_vneg_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs, size_t vl) { return __riscv_vneg_v_i16mf4_tumu(vm, vd, vs, vl); } -vint16mf2_t test_vneg_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs, size_t vl) { +vint16mf2_t test_vneg_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs, size_t vl) { return __riscv_vneg_v_i16mf2_tumu(vm, vd, vs, vl); } -vint16m1_t test_vneg_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs, size_t vl) { +vint16m1_t test_vneg_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs, + size_t vl) { return __riscv_vneg_v_i16m1_tumu(vm, vd, vs, vl); } -vint16m2_t test_vneg_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs, size_t vl) { +vint16m2_t test_vneg_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs, + size_t vl) { return __riscv_vneg_v_i16m2_tumu(vm, vd, vs, vl); } -vint16m4_t test_vneg_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs, size_t vl) { +vint16m4_t test_vneg_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs, + size_t vl) { return __riscv_vneg_v_i16m4_tumu(vm, vd, vs, vl); } -vint16m8_t test_vneg_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs, size_t vl) { +vint16m8_t test_vneg_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs, + size_t vl) { return __riscv_vneg_v_i16m8_tumu(vm, vd, vs, vl); } -vint32mf2_t test_vneg_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs, size_t vl) { +vint32mf2_t test_vneg_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs, size_t vl) { return __riscv_vneg_v_i32mf2_tumu(vm, vd, vs, vl); } -vint32m1_t test_vneg_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs, size_t vl) { +vint32m1_t test_vneg_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs, + size_t vl) { return __riscv_vneg_v_i32m1_tumu(vm, vd, vs, vl); } -vint32m2_t test_vneg_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs, size_t vl) { +vint32m2_t test_vneg_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs, + size_t vl) { return __riscv_vneg_v_i32m2_tumu(vm, vd, vs, vl); } -vint32m4_t test_vneg_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs, size_t vl) { +vint32m4_t test_vneg_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs, + size_t vl) { return __riscv_vneg_v_i32m4_tumu(vm, vd, vs, vl); } -vint32m8_t test_vneg_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs, size_t vl) { +vint32m8_t test_vneg_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs, + size_t vl) { return __riscv_vneg_v_i32m8_tumu(vm, vd, vs, vl); } -vint64m1_t test_vneg_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs, size_t vl) { +vint64m1_t test_vneg_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs, + size_t vl) { return __riscv_vneg_v_i64m1_tumu(vm, vd, vs, vl); } -vint64m2_t test_vneg_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs, size_t vl) { +vint64m2_t test_vneg_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs, + size_t vl) { return __riscv_vneg_v_i64m2_tumu(vm, vd, vs, vl); } -vint64m4_t test_vneg_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs, size_t vl) { +vint64m4_t test_vneg_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs, + size_t vl) { return __riscv_vneg_v_i64m4_tumu(vm, vd, vs, vl); } -vint64m8_t test_vneg_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs, size_t vl) { +vint64m8_t test_vneg_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs, + size_t vl) { return __riscv_vneg_v_i64m8_tumu(vm, vd, vs, vl); } -vint8mf8_t test_vneg_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs, size_t vl) { +vint8mf8_t test_vneg_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs, + size_t vl) { return __riscv_vneg_v_i8mf8_mu(vm, vd, vs, vl); } -vint8mf4_t test_vneg_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs, size_t vl) { +vint8mf4_t test_vneg_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs, + size_t vl) { return __riscv_vneg_v_i8mf4_mu(vm, vd, vs, vl); } -vint8mf2_t test_vneg_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs, size_t vl) { +vint8mf2_t test_vneg_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs, + size_t vl) { return __riscv_vneg_v_i8mf2_mu(vm, vd, vs, vl); } -vint8m1_t test_vneg_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs, size_t vl) { +vint8m1_t test_vneg_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs, + size_t vl) { return __riscv_vneg_v_i8m1_mu(vm, vd, vs, vl); } -vint8m2_t test_vneg_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs, size_t vl) { +vint8m2_t test_vneg_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs, + size_t vl) { return __riscv_vneg_v_i8m2_mu(vm, vd, vs, vl); } -vint8m4_t test_vneg_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs, size_t vl) { +vint8m4_t test_vneg_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs, + size_t vl) { return __riscv_vneg_v_i8m4_mu(vm, vd, vs, vl); } -vint8m8_t test_vneg_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs, size_t vl) { +vint8m8_t test_vneg_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs, + size_t vl) { return __riscv_vneg_v_i8m8_mu(vm, vd, vs, vl); } -vint16mf4_t test_vneg_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs, size_t vl) { +vint16mf4_t test_vneg_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs, + size_t vl) { return __riscv_vneg_v_i16mf4_mu(vm, vd, vs, vl); } -vint16mf2_t test_vneg_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs, size_t vl) { +vint16mf2_t test_vneg_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs, + size_t vl) { return __riscv_vneg_v_i16mf2_mu(vm, vd, vs, vl); } -vint16m1_t test_vneg_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs, size_t vl) { +vint16m1_t test_vneg_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs, + size_t vl) { return __riscv_vneg_v_i16m1_mu(vm, vd, vs, vl); } -vint16m2_t test_vneg_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs, size_t vl) { +vint16m2_t test_vneg_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs, + size_t vl) { return __riscv_vneg_v_i16m2_mu(vm, vd, vs, vl); } -vint16m4_t test_vneg_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs, size_t vl) { +vint16m4_t test_vneg_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs, + size_t vl) { return __riscv_vneg_v_i16m4_mu(vm, vd, vs, vl); } -vint16m8_t test_vneg_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs, size_t vl) { +vint16m8_t test_vneg_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs, + size_t vl) { return __riscv_vneg_v_i16m8_mu(vm, vd, vs, vl); } -vint32mf2_t test_vneg_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs, size_t vl) { +vint32mf2_t test_vneg_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs, + size_t vl) { return __riscv_vneg_v_i32mf2_mu(vm, vd, vs, vl); } -vint32m1_t test_vneg_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs, size_t vl) { +vint32m1_t test_vneg_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs, + size_t vl) { return __riscv_vneg_v_i32m1_mu(vm, vd, vs, vl); } -vint32m2_t test_vneg_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs, size_t vl) { +vint32m2_t test_vneg_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs, + size_t vl) { return __riscv_vneg_v_i32m2_mu(vm, vd, vs, vl); } -vint32m4_t test_vneg_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs, size_t vl) { +vint32m4_t test_vneg_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs, + size_t vl) { return __riscv_vneg_v_i32m4_mu(vm, vd, vs, vl); } -vint32m8_t test_vneg_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs, size_t vl) { +vint32m8_t test_vneg_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs, + size_t vl) { return __riscv_vneg_v_i32m8_mu(vm, vd, vs, vl); } -vint64m1_t test_vneg_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs, size_t vl) { +vint64m1_t test_vneg_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs, + size_t vl) { return __riscv_vneg_v_i64m1_mu(vm, vd, vs, vl); } -vint64m2_t test_vneg_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs, size_t vl) { +vint64m2_t test_vneg_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs, + size_t vl) { return __riscv_vneg_v_i64m2_mu(vm, vd, vs, vl); } -vint64m4_t test_vneg_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs, size_t vl) { +vint64m4_t test_vneg_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs, + size_t vl) { return __riscv_vneg_v_i64m4_mu(vm, vd, vs, vl); } -vint64m8_t test_vneg_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs, size_t vl) { +vint64m8_t test_vneg_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs, + size_t vl) { return __riscv_vneg_v_i64m8_mu(vm, vd, vs, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vnmsac.c b/auto-generated/policy_funcs/llvm-api-tests/vnmsac.c index 4303fe450..2870097c0 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vnmsac.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vnmsac.c @@ -1,1415 +1,1857 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vint8mf8_t test_vnmsac_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vnmsac_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, + vint8mf8_t vs2, size_t vl) { return __riscv_vnmsac_vv_i8mf8_tu(vd, vs1, vs2, vl); } -vint8mf8_t test_vnmsac_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vnmsac_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, + size_t vl) { return __riscv_vnmsac_vx_i8mf8_tu(vd, rs1, vs2, vl); } -vint8mf4_t test_vnmsac_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vnmsac_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, + vint8mf4_t vs2, size_t vl) { return __riscv_vnmsac_vv_i8mf4_tu(vd, vs1, vs2, vl); } -vint8mf4_t test_vnmsac_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vnmsac_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, + size_t vl) { return __riscv_vnmsac_vx_i8mf4_tu(vd, rs1, vs2, vl); } -vint8mf2_t test_vnmsac_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vnmsac_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vnmsac_vv_i8mf2_tu(vd, vs1, vs2, vl); } -vint8mf2_t test_vnmsac_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vnmsac_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, + size_t vl) { return __riscv_vnmsac_vx_i8mf2_tu(vd, rs1, vs2, vl); } -vint8m1_t test_vnmsac_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vnmsac_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, + size_t vl) { return __riscv_vnmsac_vv_i8m1_tu(vd, vs1, vs2, vl); } -vint8m1_t test_vnmsac_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vnmsac_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, + size_t vl) { return __riscv_vnmsac_vx_i8m1_tu(vd, rs1, vs2, vl); } -vint8m2_t test_vnmsac_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vnmsac_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, + size_t vl) { return __riscv_vnmsac_vv_i8m2_tu(vd, vs1, vs2, vl); } -vint8m2_t test_vnmsac_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vnmsac_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, + size_t vl) { return __riscv_vnmsac_vx_i8m2_tu(vd, rs1, vs2, vl); } -vint8m4_t test_vnmsac_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vnmsac_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, + size_t vl) { return __riscv_vnmsac_vv_i8m4_tu(vd, vs1, vs2, vl); } -vint8m4_t test_vnmsac_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vnmsac_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, + size_t vl) { return __riscv_vnmsac_vx_i8m4_tu(vd, rs1, vs2, vl); } -vint8m8_t test_vnmsac_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vnmsac_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, + size_t vl) { return __riscv_vnmsac_vv_i8m8_tu(vd, vs1, vs2, vl); } -vint8m8_t test_vnmsac_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vnmsac_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, + size_t vl) { return __riscv_vnmsac_vx_i8m8_tu(vd, rs1, vs2, vl); } -vint16mf4_t test_vnmsac_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vnmsac_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, + vint16mf4_t vs2, size_t vl) { return __riscv_vnmsac_vv_i16mf4_tu(vd, vs1, vs2, vl); } -vint16mf4_t test_vnmsac_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vnmsac_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, + vint16mf4_t vs2, size_t vl) { return __riscv_vnmsac_vx_i16mf4_tu(vd, rs1, vs2, vl); } -vint16mf2_t test_vnmsac_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vnmsac_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, + vint16mf2_t vs2, size_t vl) { return __riscv_vnmsac_vv_i16mf2_tu(vd, vs1, vs2, vl); } -vint16mf2_t test_vnmsac_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vnmsac_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, + vint16mf2_t vs2, size_t vl) { return __riscv_vnmsac_vx_i16mf2_tu(vd, rs1, vs2, vl); } -vint16m1_t test_vnmsac_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vnmsac_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, + vint16m1_t vs2, size_t vl) { return __riscv_vnmsac_vv_i16m1_tu(vd, vs1, vs2, vl); } -vint16m1_t test_vnmsac_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vnmsac_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, + size_t vl) { return __riscv_vnmsac_vx_i16m1_tu(vd, rs1, vs2, vl); } -vint16m2_t test_vnmsac_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vnmsac_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, + vint16m2_t vs2, size_t vl) { return __riscv_vnmsac_vv_i16m2_tu(vd, vs1, vs2, vl); } -vint16m2_t test_vnmsac_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vnmsac_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, + size_t vl) { return __riscv_vnmsac_vx_i16m2_tu(vd, rs1, vs2, vl); } -vint16m4_t test_vnmsac_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vnmsac_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, + vint16m4_t vs2, size_t vl) { return __riscv_vnmsac_vv_i16m4_tu(vd, vs1, vs2, vl); } -vint16m4_t test_vnmsac_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vnmsac_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, + size_t vl) { return __riscv_vnmsac_vx_i16m4_tu(vd, rs1, vs2, vl); } -vint16m8_t test_vnmsac_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vnmsac_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, + vint16m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_i16m8_tu(vd, vs1, vs2, vl); } -vint16m8_t test_vnmsac_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vnmsac_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, + size_t vl) { return __riscv_vnmsac_vx_i16m8_tu(vd, rs1, vs2, vl); } -vint32mf2_t test_vnmsac_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vnmsac_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, + vint32mf2_t vs2, size_t vl) { return __riscv_vnmsac_vv_i32mf2_tu(vd, vs1, vs2, vl); } -vint32mf2_t test_vnmsac_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vnmsac_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, + vint32mf2_t vs2, size_t vl) { return __riscv_vnmsac_vx_i32mf2_tu(vd, rs1, vs2, vl); } -vint32m1_t test_vnmsac_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vnmsac_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, + vint32m1_t vs2, size_t vl) { return __riscv_vnmsac_vv_i32m1_tu(vd, vs1, vs2, vl); } -vint32m1_t test_vnmsac_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vnmsac_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, + size_t vl) { return __riscv_vnmsac_vx_i32m1_tu(vd, rs1, vs2, vl); } -vint32m2_t test_vnmsac_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vnmsac_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, + vint32m2_t vs2, size_t vl) { return __riscv_vnmsac_vv_i32m2_tu(vd, vs1, vs2, vl); } -vint32m2_t test_vnmsac_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vnmsac_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, + size_t vl) { return __riscv_vnmsac_vx_i32m2_tu(vd, rs1, vs2, vl); } -vint32m4_t test_vnmsac_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vnmsac_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, + vint32m4_t vs2, size_t vl) { return __riscv_vnmsac_vv_i32m4_tu(vd, vs1, vs2, vl); } -vint32m4_t test_vnmsac_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vnmsac_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, + size_t vl) { return __riscv_vnmsac_vx_i32m4_tu(vd, rs1, vs2, vl); } -vint32m8_t test_vnmsac_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vnmsac_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, + vint32m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_i32m8_tu(vd, vs1, vs2, vl); } -vint32m8_t test_vnmsac_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vnmsac_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, + size_t vl) { return __riscv_vnmsac_vx_i32m8_tu(vd, rs1, vs2, vl); } -vint64m1_t test_vnmsac_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vnmsac_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, + vint64m1_t vs2, size_t vl) { return __riscv_vnmsac_vv_i64m1_tu(vd, vs1, vs2, vl); } -vint64m1_t test_vnmsac_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vnmsac_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, + size_t vl) { return __riscv_vnmsac_vx_i64m1_tu(vd, rs1, vs2, vl); } -vint64m2_t test_vnmsac_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vnmsac_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, + vint64m2_t vs2, size_t vl) { return __riscv_vnmsac_vv_i64m2_tu(vd, vs1, vs2, vl); } -vint64m2_t test_vnmsac_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vnmsac_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, + size_t vl) { return __riscv_vnmsac_vx_i64m2_tu(vd, rs1, vs2, vl); } -vint64m4_t test_vnmsac_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vnmsac_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, + vint64m4_t vs2, size_t vl) { return __riscv_vnmsac_vv_i64m4_tu(vd, vs1, vs2, vl); } -vint64m4_t test_vnmsac_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vnmsac_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, + size_t vl) { return __riscv_vnmsac_vx_i64m4_tu(vd, rs1, vs2, vl); } -vint64m8_t test_vnmsac_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vnmsac_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, + vint64m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_i64m8_tu(vd, vs1, vs2, vl); } -vint64m8_t test_vnmsac_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vnmsac_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, + size_t vl) { return __riscv_vnmsac_vx_i64m8_tu(vd, rs1, vs2, vl); } -vuint8mf8_t test_vnmsac_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vnmsac_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, + vuint8mf8_t vs2, size_t vl) { return __riscv_vnmsac_vv_u8mf8_tu(vd, vs1, vs2, vl); } -vuint8mf8_t test_vnmsac_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vnmsac_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, + vuint8mf8_t vs2, size_t vl) { return __riscv_vnmsac_vx_u8mf8_tu(vd, rs1, vs2, vl); } -vuint8mf4_t test_vnmsac_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vnmsac_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, + vuint8mf4_t vs2, size_t vl) { return __riscv_vnmsac_vv_u8mf4_tu(vd, vs1, vs2, vl); } -vuint8mf4_t test_vnmsac_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vnmsac_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, + vuint8mf4_t vs2, size_t vl) { return __riscv_vnmsac_vx_u8mf4_tu(vd, rs1, vs2, vl); } -vuint8mf2_t test_vnmsac_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vnmsac_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, + vuint8mf2_t vs2, size_t vl) { return __riscv_vnmsac_vv_u8mf2_tu(vd, vs1, vs2, vl); } -vuint8mf2_t test_vnmsac_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vnmsac_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, + vuint8mf2_t vs2, size_t vl) { return __riscv_vnmsac_vx_u8mf2_tu(vd, rs1, vs2, vl); } -vuint8m1_t test_vnmsac_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vnmsac_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u8m1_tu(vd, vs1, vs2, vl); } -vuint8m1_t test_vnmsac_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vnmsac_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, + size_t vl) { return __riscv_vnmsac_vx_u8m1_tu(vd, rs1, vs2, vl); } -vuint8m2_t test_vnmsac_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vnmsac_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u8m2_tu(vd, vs1, vs2, vl); } -vuint8m2_t test_vnmsac_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vnmsac_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, + size_t vl) { return __riscv_vnmsac_vx_u8m2_tu(vd, rs1, vs2, vl); } -vuint8m4_t test_vnmsac_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vnmsac_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u8m4_tu(vd, vs1, vs2, vl); } -vuint8m4_t test_vnmsac_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vnmsac_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, + size_t vl) { return __riscv_vnmsac_vx_u8m4_tu(vd, rs1, vs2, vl); } -vuint8m8_t test_vnmsac_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vnmsac_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u8m8_tu(vd, vs1, vs2, vl); } -vuint8m8_t test_vnmsac_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vnmsac_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, + size_t vl) { return __riscv_vnmsac_vx_u8m8_tu(vd, rs1, vs2, vl); } -vuint16mf4_t test_vnmsac_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vnmsac_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, + vuint16mf4_t vs2, size_t vl) { return __riscv_vnmsac_vv_u16mf4_tu(vd, vs1, vs2, vl); } -vuint16mf4_t test_vnmsac_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vnmsac_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, + vuint16mf4_t vs2, size_t vl) { return __riscv_vnmsac_vx_u16mf4_tu(vd, rs1, vs2, vl); } -vuint16mf2_t test_vnmsac_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vnmsac_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, + vuint16mf2_t vs2, size_t vl) { return __riscv_vnmsac_vv_u16mf2_tu(vd, vs1, vs2, vl); } -vuint16mf2_t test_vnmsac_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vnmsac_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, + vuint16mf2_t vs2, size_t vl) { return __riscv_vnmsac_vx_u16mf2_tu(vd, rs1, vs2, vl); } -vuint16m1_t test_vnmsac_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vnmsac_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, + vuint16m1_t vs2, size_t vl) { return __riscv_vnmsac_vv_u16m1_tu(vd, vs1, vs2, vl); } -vuint16m1_t test_vnmsac_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vnmsac_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, + vuint16m1_t vs2, size_t vl) { return __riscv_vnmsac_vx_u16m1_tu(vd, rs1, vs2, vl); } -vuint16m2_t test_vnmsac_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vnmsac_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vnmsac_vv_u16m2_tu(vd, vs1, vs2, vl); } -vuint16m2_t test_vnmsac_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vnmsac_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vnmsac_vx_u16m2_tu(vd, rs1, vs2, vl); } -vuint16m4_t test_vnmsac_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vnmsac_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vnmsac_vv_u16m4_tu(vd, vs1, vs2, vl); } -vuint16m4_t test_vnmsac_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vnmsac_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vnmsac_vx_u16m4_tu(vd, rs1, vs2, vl); } -vuint16m8_t test_vnmsac_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vnmsac_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, + vuint16m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_u16m8_tu(vd, vs1, vs2, vl); } -vuint16m8_t test_vnmsac_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vnmsac_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, + vuint16m8_t vs2, size_t vl) { return __riscv_vnmsac_vx_u16m8_tu(vd, rs1, vs2, vl); } -vuint32mf2_t test_vnmsac_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vnmsac_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, + vuint32mf2_t vs2, size_t vl) { return __riscv_vnmsac_vv_u32mf2_tu(vd, vs1, vs2, vl); } -vuint32mf2_t test_vnmsac_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vnmsac_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, + vuint32mf2_t vs2, size_t vl) { return __riscv_vnmsac_vx_u32mf2_tu(vd, rs1, vs2, vl); } -vuint32m1_t test_vnmsac_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vnmsac_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, + vuint32m1_t vs2, size_t vl) { return __riscv_vnmsac_vv_u32m1_tu(vd, vs1, vs2, vl); } -vuint32m1_t test_vnmsac_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vnmsac_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, + vuint32m1_t vs2, size_t vl) { return __riscv_vnmsac_vx_u32m1_tu(vd, rs1, vs2, vl); } -vuint32m2_t test_vnmsac_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vnmsac_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, + vuint32m2_t vs2, size_t vl) { return __riscv_vnmsac_vv_u32m2_tu(vd, vs1, vs2, vl); } -vuint32m2_t test_vnmsac_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vnmsac_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, + vuint32m2_t vs2, size_t vl) { return __riscv_vnmsac_vx_u32m2_tu(vd, rs1, vs2, vl); } -vuint32m4_t test_vnmsac_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vnmsac_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vnmsac_vv_u32m4_tu(vd, vs1, vs2, vl); } -vuint32m4_t test_vnmsac_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vnmsac_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vnmsac_vx_u32m4_tu(vd, rs1, vs2, vl); } -vuint32m8_t test_vnmsac_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vnmsac_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, + vuint32m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_u32m8_tu(vd, vs1, vs2, vl); } -vuint32m8_t test_vnmsac_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vnmsac_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, + vuint32m8_t vs2, size_t vl) { return __riscv_vnmsac_vx_u32m8_tu(vd, rs1, vs2, vl); } -vuint64m1_t test_vnmsac_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vnmsac_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, + vuint64m1_t vs2, size_t vl) { return __riscv_vnmsac_vv_u64m1_tu(vd, vs1, vs2, vl); } -vuint64m1_t test_vnmsac_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vnmsac_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, + vuint64m1_t vs2, size_t vl) { return __riscv_vnmsac_vx_u64m1_tu(vd, rs1, vs2, vl); } -vuint64m2_t test_vnmsac_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vnmsac_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, + vuint64m2_t vs2, size_t vl) { return __riscv_vnmsac_vv_u64m2_tu(vd, vs1, vs2, vl); } -vuint64m2_t test_vnmsac_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vnmsac_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, + vuint64m2_t vs2, size_t vl) { return __riscv_vnmsac_vx_u64m2_tu(vd, rs1, vs2, vl); } -vuint64m4_t test_vnmsac_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vnmsac_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, + vuint64m4_t vs2, size_t vl) { return __riscv_vnmsac_vv_u64m4_tu(vd, vs1, vs2, vl); } -vuint64m4_t test_vnmsac_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vnmsac_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, + vuint64m4_t vs2, size_t vl) { return __riscv_vnmsac_vx_u64m4_tu(vd, rs1, vs2, vl); } -vuint64m8_t test_vnmsac_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vnmsac_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, + vuint64m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_u64m8_tu(vd, vs1, vs2, vl); } -vuint64m8_t test_vnmsac_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vnmsac_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, + vuint64m8_t vs2, size_t vl) { return __riscv_vnmsac_vx_u64m8_tu(vd, rs1, vs2, vl); } -vint8mf8_t test_vnmsac_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vnmsac_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, + vint8mf8_t vs2, size_t vl) { return __riscv_vnmsac_vv_i8mf8_tum(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vnmsac_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vnmsac_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, int8_t rs1, + vint8mf8_t vs2, size_t vl) { return __riscv_vnmsac_vx_i8mf8_tum(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vnmsac_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vnmsac_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, + vint8mf4_t vs2, size_t vl) { return __riscv_vnmsac_vv_i8mf4_tum(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vnmsac_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vnmsac_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, int8_t rs1, + vint8mf4_t vs2, size_t vl) { return __riscv_vnmsac_vx_i8mf4_tum(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vnmsac_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vnmsac_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vnmsac_vv_i8mf2_tum(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vnmsac_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vnmsac_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, int8_t rs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vnmsac_vx_i8mf2_tum(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vnmsac_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vnmsac_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, + vint8m1_t vs2, size_t vl) { return __riscv_vnmsac_vv_i8m1_tum(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vnmsac_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vnmsac_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, int8_t rs1, + vint8m1_t vs2, size_t vl) { return __riscv_vnmsac_vx_i8m1_tum(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vnmsac_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vnmsac_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, + vint8m2_t vs2, size_t vl) { return __riscv_vnmsac_vv_i8m2_tum(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vnmsac_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vnmsac_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, int8_t rs1, + vint8m2_t vs2, size_t vl) { return __riscv_vnmsac_vx_i8m2_tum(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vnmsac_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vnmsac_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, + vint8m4_t vs2, size_t vl) { return __riscv_vnmsac_vv_i8m4_tum(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vnmsac_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vnmsac_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, int8_t rs1, + vint8m4_t vs2, size_t vl) { return __riscv_vnmsac_vx_i8m4_tum(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vnmsac_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vnmsac_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, + vint8m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_i8m8_tum(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vnmsac_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vnmsac_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, int8_t rs1, + vint8m8_t vs2, size_t vl) { return __riscv_vnmsac_vx_i8m8_tum(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vnmsac_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vnmsac_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs1, vint16mf4_t vs2, + size_t vl) { return __riscv_vnmsac_vv_i16mf4_tum(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vnmsac_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vnmsac_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, int16_t rs1, + vint16mf4_t vs2, size_t vl) { return __riscv_vnmsac_vx_i16mf4_tum(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vnmsac_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vnmsac_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs1, vint16mf2_t vs2, + size_t vl) { return __riscv_vnmsac_vv_i16mf2_tum(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vnmsac_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vnmsac_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, int16_t rs1, + vint16mf2_t vs2, size_t vl) { return __riscv_vnmsac_vx_i16mf2_tum(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vnmsac_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vnmsac_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, + vint16m1_t vs2, size_t vl) { return __riscv_vnmsac_vv_i16m1_tum(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vnmsac_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vnmsac_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, int16_t rs1, + vint16m1_t vs2, size_t vl) { return __riscv_vnmsac_vx_i16m1_tum(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vnmsac_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vnmsac_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, + vint16m2_t vs2, size_t vl) { return __riscv_vnmsac_vv_i16m2_tum(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vnmsac_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vnmsac_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, int16_t rs1, + vint16m2_t vs2, size_t vl) { return __riscv_vnmsac_vx_i16m2_tum(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vnmsac_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vnmsac_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, + vint16m4_t vs2, size_t vl) { return __riscv_vnmsac_vv_i16m4_tum(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vnmsac_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vnmsac_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, int16_t rs1, + vint16m4_t vs2, size_t vl) { return __riscv_vnmsac_vx_i16m4_tum(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vnmsac_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vnmsac_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, + vint16m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_i16m8_tum(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vnmsac_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vnmsac_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, int16_t rs1, + vint16m8_t vs2, size_t vl) { return __riscv_vnmsac_vx_i16m8_tum(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vnmsac_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vnmsac_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs1, vint32mf2_t vs2, + size_t vl) { return __riscv_vnmsac_vv_i32mf2_tum(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vnmsac_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vnmsac_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, int32_t rs1, + vint32mf2_t vs2, size_t vl) { return __riscv_vnmsac_vx_i32mf2_tum(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vnmsac_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vnmsac_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, + vint32m1_t vs2, size_t vl) { return __riscv_vnmsac_vv_i32m1_tum(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vnmsac_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vnmsac_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, int32_t rs1, + vint32m1_t vs2, size_t vl) { return __riscv_vnmsac_vx_i32m1_tum(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vnmsac_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vnmsac_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, + vint32m2_t vs2, size_t vl) { return __riscv_vnmsac_vv_i32m2_tum(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vnmsac_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vnmsac_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, int32_t rs1, + vint32m2_t vs2, size_t vl) { return __riscv_vnmsac_vx_i32m2_tum(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vnmsac_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vnmsac_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, + vint32m4_t vs2, size_t vl) { return __riscv_vnmsac_vv_i32m4_tum(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vnmsac_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vnmsac_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, int32_t rs1, + vint32m4_t vs2, size_t vl) { return __riscv_vnmsac_vx_i32m4_tum(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vnmsac_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vnmsac_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, + vint32m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_i32m8_tum(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vnmsac_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vnmsac_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, int32_t rs1, + vint32m8_t vs2, size_t vl) { return __riscv_vnmsac_vx_i32m8_tum(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vnmsac_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vnmsac_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, + vint64m1_t vs2, size_t vl) { return __riscv_vnmsac_vv_i64m1_tum(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vnmsac_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vnmsac_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, int64_t rs1, + vint64m1_t vs2, size_t vl) { return __riscv_vnmsac_vx_i64m1_tum(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vnmsac_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vnmsac_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, + vint64m2_t vs2, size_t vl) { return __riscv_vnmsac_vv_i64m2_tum(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vnmsac_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vnmsac_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, int64_t rs1, + vint64m2_t vs2, size_t vl) { return __riscv_vnmsac_vx_i64m2_tum(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vnmsac_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vnmsac_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, + vint64m4_t vs2, size_t vl) { return __riscv_vnmsac_vv_i64m4_tum(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vnmsac_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vnmsac_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, int64_t rs1, + vint64m4_t vs2, size_t vl) { return __riscv_vnmsac_vx_i64m4_tum(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vnmsac_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vnmsac_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, + vint64m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_i64m8_tum(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vnmsac_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vnmsac_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, int64_t rs1, + vint64m8_t vs2, size_t vl) { return __riscv_vnmsac_vx_i64m8_tum(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vnmsac_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vnmsac_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs1, vuint8mf8_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u8mf8_tum(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vnmsac_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vnmsac_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, + vuint8mf8_t vs2, size_t vl) { return __riscv_vnmsac_vx_u8mf8_tum(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vnmsac_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vnmsac_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs1, vuint8mf4_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u8mf4_tum(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vnmsac_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vnmsac_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, + vuint8mf4_t vs2, size_t vl) { return __riscv_vnmsac_vx_u8mf4_tum(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vnmsac_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vnmsac_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs1, vuint8mf2_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u8mf2_tum(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vnmsac_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vnmsac_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, + vuint8mf2_t vs2, size_t vl) { return __riscv_vnmsac_vx_u8mf2_tum(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vnmsac_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vnmsac_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vnmsac_vv_u8m1_tum(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vnmsac_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vnmsac_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vnmsac_vx_u8m1_tum(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vnmsac_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vnmsac_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vnmsac_vv_u8m2_tum(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vnmsac_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vnmsac_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vnmsac_vx_u8m2_tum(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vnmsac_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vnmsac_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vnmsac_vv_u8m4_tum(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vnmsac_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vnmsac_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vnmsac_vx_u8m4_tum(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vnmsac_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vnmsac_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, + vuint8m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_u8m8_tum(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vnmsac_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vnmsac_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, + vuint8m8_t vs2, size_t vl) { return __riscv_vnmsac_vx_u8m8_tum(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vnmsac_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vnmsac_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u16mf4_tum(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vnmsac_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vnmsac_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + uint16_t rs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vnmsac_vx_u16mf4_tum(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vnmsac_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vnmsac_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u16mf2_tum(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vnmsac_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vnmsac_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + uint16_t rs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vnmsac_vx_u16mf2_tum(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vnmsac_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vnmsac_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs1, vuint16m1_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u16m1_tum(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vnmsac_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vnmsac_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, + vuint16m1_t vs2, size_t vl) { return __riscv_vnmsac_vx_u16m1_tum(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vnmsac_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vnmsac_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs1, vuint16m2_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u16m2_tum(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vnmsac_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vnmsac_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vnmsac_vx_u16m2_tum(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vnmsac_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vnmsac_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs1, vuint16m4_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u16m4_tum(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vnmsac_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vnmsac_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vnmsac_vx_u16m4_tum(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vnmsac_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vnmsac_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs1, vuint16m8_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u16m8_tum(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vnmsac_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vnmsac_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, + vuint16m8_t vs2, size_t vl) { return __riscv_vnmsac_vx_u16m8_tum(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vnmsac_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vnmsac_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u32mf2_tum(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vnmsac_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vnmsac_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + uint32_t rs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vnmsac_vx_u32mf2_tum(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vnmsac_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vnmsac_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs1, vuint32m1_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u32m1_tum(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vnmsac_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vnmsac_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, + vuint32m1_t vs2, size_t vl) { return __riscv_vnmsac_vx_u32m1_tum(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vnmsac_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vnmsac_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs1, vuint32m2_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u32m2_tum(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vnmsac_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vnmsac_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, + vuint32m2_t vs2, size_t vl) { return __riscv_vnmsac_vx_u32m2_tum(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vnmsac_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vnmsac_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs1, vuint32m4_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u32m4_tum(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vnmsac_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vnmsac_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vnmsac_vx_u32m4_tum(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vnmsac_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vnmsac_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs1, vuint32m8_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u32m8_tum(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vnmsac_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vnmsac_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, + vuint32m8_t vs2, size_t vl) { return __riscv_vnmsac_vx_u32m8_tum(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vnmsac_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vnmsac_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs1, vuint64m1_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u64m1_tum(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vnmsac_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vnmsac_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, + vuint64m1_t vs2, size_t vl) { return __riscv_vnmsac_vx_u64m1_tum(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vnmsac_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vnmsac_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs1, vuint64m2_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u64m2_tum(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vnmsac_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vnmsac_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, + vuint64m2_t vs2, size_t vl) { return __riscv_vnmsac_vx_u64m2_tum(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vnmsac_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vnmsac_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs1, vuint64m4_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u64m4_tum(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vnmsac_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vnmsac_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, + vuint64m4_t vs2, size_t vl) { return __riscv_vnmsac_vx_u64m4_tum(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vnmsac_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vnmsac_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs1, vuint64m8_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u64m8_tum(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vnmsac_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vnmsac_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, + vuint64m8_t vs2, size_t vl) { return __riscv_vnmsac_vx_u64m8_tum(vm, vd, rs1, vs2, vl); } -vint8mf8_t test_vnmsac_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vnmsac_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + vint8mf8_t vs1, vint8mf8_t vs2, + size_t vl) { return __riscv_vnmsac_vv_i8mf8_tumu(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vnmsac_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vnmsac_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, + vint8mf8_t vs2, size_t vl) { return __riscv_vnmsac_vx_i8mf8_tumu(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vnmsac_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vnmsac_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + vint8mf4_t vs1, vint8mf4_t vs2, + size_t vl) { return __riscv_vnmsac_vv_i8mf4_tumu(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vnmsac_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vnmsac_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, + vint8mf4_t vs2, size_t vl) { return __riscv_vnmsac_vx_i8mf4_tumu(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vnmsac_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vnmsac_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + vint8mf2_t vs1, vint8mf2_t vs2, + size_t vl) { return __riscv_vnmsac_vv_i8mf2_tumu(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vnmsac_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vnmsac_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vnmsac_vx_i8mf2_tumu(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vnmsac_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vnmsac_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, + vint8m1_t vs2, size_t vl) { return __riscv_vnmsac_vv_i8m1_tumu(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vnmsac_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vnmsac_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, int8_t rs1, + vint8m1_t vs2, size_t vl) { return __riscv_vnmsac_vx_i8m1_tumu(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vnmsac_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vnmsac_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, + vint8m2_t vs2, size_t vl) { return __riscv_vnmsac_vv_i8m2_tumu(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vnmsac_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vnmsac_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, int8_t rs1, + vint8m2_t vs2, size_t vl) { return __riscv_vnmsac_vx_i8m2_tumu(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vnmsac_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vnmsac_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, + vint8m4_t vs2, size_t vl) { return __riscv_vnmsac_vv_i8m4_tumu(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vnmsac_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vnmsac_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, int8_t rs1, + vint8m4_t vs2, size_t vl) { return __riscv_vnmsac_vx_i8m4_tumu(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vnmsac_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vnmsac_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, + vint8m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_i8m8_tumu(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vnmsac_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vnmsac_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, int8_t rs1, + vint8m8_t vs2, size_t vl) { return __riscv_vnmsac_vx_i8m8_tumu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vnmsac_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vnmsac_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs1, vint16mf4_t vs2, + size_t vl) { return __riscv_vnmsac_vv_i16mf4_tumu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vnmsac_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vnmsac_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + int16_t rs1, vint16mf4_t vs2, + size_t vl) { return __riscv_vnmsac_vx_i16mf4_tumu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vnmsac_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vnmsac_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs1, vint16mf2_t vs2, + size_t vl) { return __riscv_vnmsac_vv_i16mf2_tumu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vnmsac_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vnmsac_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + int16_t rs1, vint16mf2_t vs2, + size_t vl) { return __riscv_vnmsac_vx_i16mf2_tumu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vnmsac_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vnmsac_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs1, vint16m1_t vs2, + size_t vl) { return __riscv_vnmsac_vv_i16m1_tumu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vnmsac_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vnmsac_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, int16_t rs1, + vint16m1_t vs2, size_t vl) { return __riscv_vnmsac_vx_i16m1_tumu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vnmsac_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vnmsac_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, + vint16m2_t vs2, size_t vl) { return __riscv_vnmsac_vv_i16m2_tumu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vnmsac_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vnmsac_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, int16_t rs1, + vint16m2_t vs2, size_t vl) { return __riscv_vnmsac_vx_i16m2_tumu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vnmsac_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vnmsac_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, + vint16m4_t vs2, size_t vl) { return __riscv_vnmsac_vv_i16m4_tumu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vnmsac_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vnmsac_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, int16_t rs1, + vint16m4_t vs2, size_t vl) { return __riscv_vnmsac_vx_i16m4_tumu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vnmsac_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vnmsac_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, + vint16m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_i16m8_tumu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vnmsac_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vnmsac_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, int16_t rs1, + vint16m8_t vs2, size_t vl) { return __riscv_vnmsac_vx_i16m8_tumu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vnmsac_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vnmsac_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs1, vint32mf2_t vs2, + size_t vl) { return __riscv_vnmsac_vv_i32mf2_tumu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vnmsac_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vnmsac_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + int32_t rs1, vint32mf2_t vs2, + size_t vl) { return __riscv_vnmsac_vx_i32mf2_tumu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vnmsac_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vnmsac_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs1, vint32m1_t vs2, + size_t vl) { return __riscv_vnmsac_vv_i32m1_tumu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vnmsac_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vnmsac_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, int32_t rs1, + vint32m1_t vs2, size_t vl) { return __riscv_vnmsac_vx_i32m1_tumu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vnmsac_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vnmsac_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + vint32m2_t vs1, vint32m2_t vs2, + size_t vl) { return __riscv_vnmsac_vv_i32m2_tumu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vnmsac_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vnmsac_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, int32_t rs1, + vint32m2_t vs2, size_t vl) { return __riscv_vnmsac_vx_i32m2_tumu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vnmsac_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vnmsac_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, + vint32m4_t vs2, size_t vl) { return __riscv_vnmsac_vv_i32m4_tumu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vnmsac_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vnmsac_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, int32_t rs1, + vint32m4_t vs2, size_t vl) { return __riscv_vnmsac_vx_i32m4_tumu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vnmsac_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vnmsac_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, + vint32m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_i32m8_tumu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vnmsac_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vnmsac_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, int32_t rs1, + vint32m8_t vs2, size_t vl) { return __riscv_vnmsac_vx_i32m8_tumu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vnmsac_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vnmsac_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs1, vint64m1_t vs2, + size_t vl) { return __riscv_vnmsac_vv_i64m1_tumu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vnmsac_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vnmsac_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, int64_t rs1, + vint64m1_t vs2, size_t vl) { return __riscv_vnmsac_vx_i64m1_tumu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vnmsac_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vnmsac_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + vint64m2_t vs1, vint64m2_t vs2, + size_t vl) { return __riscv_vnmsac_vv_i64m2_tumu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vnmsac_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vnmsac_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, int64_t rs1, + vint64m2_t vs2, size_t vl) { return __riscv_vnmsac_vx_i64m2_tumu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vnmsac_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vnmsac_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + vint64m4_t vs1, vint64m4_t vs2, + size_t vl) { return __riscv_vnmsac_vv_i64m4_tumu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vnmsac_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vnmsac_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, int64_t rs1, + vint64m4_t vs2, size_t vl) { return __riscv_vnmsac_vx_i64m4_tumu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vnmsac_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vnmsac_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, + vint64m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_i64m8_tumu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vnmsac_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vnmsac_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, int64_t rs1, + vint64m8_t vs2, size_t vl) { return __riscv_vnmsac_vx_i64m8_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vnmsac_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vnmsac_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs1, vuint8mf8_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u8mf8_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vnmsac_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vnmsac_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, + vuint8mf8_t vs2, size_t vl) { return __riscv_vnmsac_vx_u8mf8_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vnmsac_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vnmsac_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs1, vuint8mf4_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u8mf4_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vnmsac_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vnmsac_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, + vuint8mf4_t vs2, size_t vl) { return __riscv_vnmsac_vx_u8mf4_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vnmsac_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vnmsac_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs1, vuint8mf2_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u8mf2_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vnmsac_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vnmsac_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, + vuint8mf2_t vs2, size_t vl) { return __riscv_vnmsac_vx_u8mf2_tumu(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vnmsac_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vnmsac_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vnmsac_vv_u8m1_tumu(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vnmsac_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vnmsac_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vnmsac_vx_u8m1_tumu(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vnmsac_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vnmsac_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vnmsac_vv_u8m2_tumu(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vnmsac_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vnmsac_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vnmsac_vx_u8m2_tumu(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vnmsac_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vnmsac_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vnmsac_vv_u8m4_tumu(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vnmsac_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vnmsac_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vnmsac_vx_u8m4_tumu(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vnmsac_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vnmsac_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, + vuint8m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_u8m8_tumu(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vnmsac_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vnmsac_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, + vuint8m8_t vs2, size_t vl) { return __riscv_vnmsac_vx_u8m8_tumu(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vnmsac_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vnmsac_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u16mf4_tumu(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vnmsac_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vnmsac_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + uint16_t rs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vnmsac_vx_u16mf4_tumu(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vnmsac_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vnmsac_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u16mf2_tumu(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vnmsac_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vnmsac_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + uint16_t rs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vnmsac_vx_u16mf2_tumu(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vnmsac_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vnmsac_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs1, vuint16m1_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u16m1_tumu(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vnmsac_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vnmsac_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + uint16_t rs1, vuint16m1_t vs2, + size_t vl) { return __riscv_vnmsac_vx_u16m1_tumu(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vnmsac_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vnmsac_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs1, vuint16m2_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u16m2_tumu(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vnmsac_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vnmsac_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vnmsac_vx_u16m2_tumu(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vnmsac_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vnmsac_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs1, vuint16m4_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u16m4_tumu(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vnmsac_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vnmsac_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vnmsac_vx_u16m4_tumu(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vnmsac_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vnmsac_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs1, vuint16m8_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u16m8_tumu(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vnmsac_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vnmsac_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, + vuint16m8_t vs2, size_t vl) { return __riscv_vnmsac_vx_u16m8_tumu(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vnmsac_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vnmsac_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u32mf2_tumu(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vnmsac_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vnmsac_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + uint32_t rs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vnmsac_vx_u32mf2_tumu(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vnmsac_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vnmsac_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs1, vuint32m1_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u32m1_tumu(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vnmsac_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vnmsac_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + uint32_t rs1, vuint32m1_t vs2, + size_t vl) { return __riscv_vnmsac_vx_u32m1_tumu(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vnmsac_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vnmsac_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs1, vuint32m2_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u32m2_tumu(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vnmsac_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vnmsac_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + uint32_t rs1, vuint32m2_t vs2, + size_t vl) { return __riscv_vnmsac_vx_u32m2_tumu(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vnmsac_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vnmsac_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs1, vuint32m4_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u32m4_tumu(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vnmsac_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vnmsac_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vnmsac_vx_u32m4_tumu(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vnmsac_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vnmsac_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs1, vuint32m8_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u32m8_tumu(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vnmsac_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vnmsac_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, + vuint32m8_t vs2, size_t vl) { return __riscv_vnmsac_vx_u32m8_tumu(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vnmsac_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vnmsac_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs1, vuint64m1_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u64m1_tumu(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vnmsac_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vnmsac_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + uint64_t rs1, vuint64m1_t vs2, + size_t vl) { return __riscv_vnmsac_vx_u64m1_tumu(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vnmsac_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vnmsac_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs1, vuint64m2_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u64m2_tumu(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vnmsac_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vnmsac_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + uint64_t rs1, vuint64m2_t vs2, + size_t vl) { return __riscv_vnmsac_vx_u64m2_tumu(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vnmsac_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vnmsac_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs1, vuint64m4_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u64m4_tumu(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vnmsac_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vnmsac_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + uint64_t rs1, vuint64m4_t vs2, + size_t vl) { return __riscv_vnmsac_vx_u64m4_tumu(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vnmsac_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vnmsac_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs1, vuint64m8_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u64m8_tumu(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vnmsac_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vnmsac_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, + vuint64m8_t vs2, size_t vl) { return __riscv_vnmsac_vx_u64m8_tumu(vm, vd, rs1, vs2, vl); } -vint8mf8_t test_vnmsac_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vnmsac_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, + vint8mf8_t vs2, size_t vl) { return __riscv_vnmsac_vv_i8mf8_mu(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vnmsac_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vnmsac_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, + vint8mf8_t vs2, size_t vl) { return __riscv_vnmsac_vx_i8mf8_mu(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vnmsac_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vnmsac_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, + vint8mf4_t vs2, size_t vl) { return __riscv_vnmsac_vv_i8mf4_mu(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vnmsac_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vnmsac_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, + vint8mf4_t vs2, size_t vl) { return __riscv_vnmsac_vx_i8mf4_mu(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vnmsac_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vnmsac_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vnmsac_vv_i8mf2_mu(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vnmsac_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vnmsac_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vnmsac_vx_i8mf2_mu(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vnmsac_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vnmsac_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, + vint8m1_t vs2, size_t vl) { return __riscv_vnmsac_vv_i8m1_mu(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vnmsac_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vnmsac_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, int8_t rs1, + vint8m1_t vs2, size_t vl) { return __riscv_vnmsac_vx_i8m1_mu(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vnmsac_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vnmsac_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, + vint8m2_t vs2, size_t vl) { return __riscv_vnmsac_vv_i8m2_mu(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vnmsac_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vnmsac_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, int8_t rs1, + vint8m2_t vs2, size_t vl) { return __riscv_vnmsac_vx_i8m2_mu(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vnmsac_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vnmsac_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, + vint8m4_t vs2, size_t vl) { return __riscv_vnmsac_vv_i8m4_mu(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vnmsac_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vnmsac_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, int8_t rs1, + vint8m4_t vs2, size_t vl) { return __riscv_vnmsac_vx_i8m4_mu(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vnmsac_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vnmsac_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, + vint8m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_i8m8_mu(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vnmsac_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vnmsac_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, int8_t rs1, + vint8m8_t vs2, size_t vl) { return __riscv_vnmsac_vx_i8m8_mu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vnmsac_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vnmsac_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs1, vint16mf4_t vs2, + size_t vl) { return __riscv_vnmsac_vv_i16mf4_mu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vnmsac_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vnmsac_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, int16_t rs1, + vint16mf4_t vs2, size_t vl) { return __riscv_vnmsac_vx_i16mf4_mu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vnmsac_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vnmsac_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs1, vint16mf2_t vs2, + size_t vl) { return __riscv_vnmsac_vv_i16mf2_mu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vnmsac_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vnmsac_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, int16_t rs1, + vint16mf2_t vs2, size_t vl) { return __riscv_vnmsac_vx_i16mf2_mu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vnmsac_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vnmsac_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, + vint16m1_t vs2, size_t vl) { return __riscv_vnmsac_vv_i16m1_mu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vnmsac_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vnmsac_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, int16_t rs1, + vint16m1_t vs2, size_t vl) { return __riscv_vnmsac_vx_i16m1_mu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vnmsac_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vnmsac_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, + vint16m2_t vs2, size_t vl) { return __riscv_vnmsac_vv_i16m2_mu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vnmsac_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vnmsac_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, int16_t rs1, + vint16m2_t vs2, size_t vl) { return __riscv_vnmsac_vx_i16m2_mu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vnmsac_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vnmsac_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, + vint16m4_t vs2, size_t vl) { return __riscv_vnmsac_vv_i16m4_mu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vnmsac_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vnmsac_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, int16_t rs1, + vint16m4_t vs2, size_t vl) { return __riscv_vnmsac_vx_i16m4_mu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vnmsac_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vnmsac_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, + vint16m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_i16m8_mu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vnmsac_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vnmsac_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, int16_t rs1, + vint16m8_t vs2, size_t vl) { return __riscv_vnmsac_vx_i16m8_mu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vnmsac_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vnmsac_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs1, vint32mf2_t vs2, + size_t vl) { return __riscv_vnmsac_vv_i32mf2_mu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vnmsac_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vnmsac_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, int32_t rs1, + vint32mf2_t vs2, size_t vl) { return __riscv_vnmsac_vx_i32mf2_mu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vnmsac_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vnmsac_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, + vint32m1_t vs2, size_t vl) { return __riscv_vnmsac_vv_i32m1_mu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vnmsac_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vnmsac_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, int32_t rs1, + vint32m1_t vs2, size_t vl) { return __riscv_vnmsac_vx_i32m1_mu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vnmsac_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vnmsac_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, + vint32m2_t vs2, size_t vl) { return __riscv_vnmsac_vv_i32m2_mu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vnmsac_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vnmsac_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, int32_t rs1, + vint32m2_t vs2, size_t vl) { return __riscv_vnmsac_vx_i32m2_mu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vnmsac_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vnmsac_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, + vint32m4_t vs2, size_t vl) { return __riscv_vnmsac_vv_i32m4_mu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vnmsac_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vnmsac_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, int32_t rs1, + vint32m4_t vs2, size_t vl) { return __riscv_vnmsac_vx_i32m4_mu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vnmsac_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vnmsac_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, + vint32m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_i32m8_mu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vnmsac_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vnmsac_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, int32_t rs1, + vint32m8_t vs2, size_t vl) { return __riscv_vnmsac_vx_i32m8_mu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vnmsac_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vnmsac_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, + vint64m1_t vs2, size_t vl) { return __riscv_vnmsac_vv_i64m1_mu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vnmsac_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vnmsac_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, int64_t rs1, + vint64m1_t vs2, size_t vl) { return __riscv_vnmsac_vx_i64m1_mu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vnmsac_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vnmsac_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, + vint64m2_t vs2, size_t vl) { return __riscv_vnmsac_vv_i64m2_mu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vnmsac_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vnmsac_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, int64_t rs1, + vint64m2_t vs2, size_t vl) { return __riscv_vnmsac_vx_i64m2_mu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vnmsac_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vnmsac_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, + vint64m4_t vs2, size_t vl) { return __riscv_vnmsac_vv_i64m4_mu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vnmsac_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vnmsac_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, int64_t rs1, + vint64m4_t vs2, size_t vl) { return __riscv_vnmsac_vx_i64m4_mu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vnmsac_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vnmsac_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, + vint64m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_i64m8_mu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vnmsac_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vnmsac_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, int64_t rs1, + vint64m8_t vs2, size_t vl) { return __riscv_vnmsac_vx_i64m8_mu(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vnmsac_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vnmsac_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs1, vuint8mf8_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u8mf8_mu(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vnmsac_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vnmsac_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, + vuint8mf8_t vs2, size_t vl) { return __riscv_vnmsac_vx_u8mf8_mu(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vnmsac_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vnmsac_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs1, vuint8mf4_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u8mf4_mu(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vnmsac_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vnmsac_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, + vuint8mf4_t vs2, size_t vl) { return __riscv_vnmsac_vx_u8mf4_mu(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vnmsac_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vnmsac_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs1, vuint8mf2_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u8mf2_mu(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vnmsac_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vnmsac_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, + vuint8mf2_t vs2, size_t vl) { return __riscv_vnmsac_vx_u8mf2_mu(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vnmsac_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vnmsac_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vnmsac_vv_u8m1_mu(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vnmsac_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vnmsac_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vnmsac_vx_u8m1_mu(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vnmsac_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vnmsac_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vnmsac_vv_u8m2_mu(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vnmsac_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vnmsac_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vnmsac_vx_u8m2_mu(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vnmsac_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vnmsac_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vnmsac_vv_u8m4_mu(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vnmsac_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vnmsac_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vnmsac_vx_u8m4_mu(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vnmsac_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vnmsac_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, + vuint8m8_t vs2, size_t vl) { return __riscv_vnmsac_vv_u8m8_mu(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vnmsac_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vnmsac_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, + vuint8m8_t vs2, size_t vl) { return __riscv_vnmsac_vx_u8m8_mu(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vnmsac_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vnmsac_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u16mf4_mu(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vnmsac_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vnmsac_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + uint16_t rs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vnmsac_vx_u16mf4_mu(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vnmsac_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vnmsac_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u16mf2_mu(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vnmsac_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vnmsac_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + uint16_t rs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vnmsac_vx_u16mf2_mu(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vnmsac_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vnmsac_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs1, vuint16m1_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u16m1_mu(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vnmsac_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vnmsac_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, + vuint16m1_t vs2, size_t vl) { return __riscv_vnmsac_vx_u16m1_mu(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vnmsac_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vnmsac_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs1, vuint16m2_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u16m2_mu(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vnmsac_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vnmsac_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vnmsac_vx_u16m2_mu(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vnmsac_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vnmsac_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs1, vuint16m4_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u16m4_mu(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vnmsac_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vnmsac_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vnmsac_vx_u16m4_mu(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vnmsac_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vnmsac_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs1, vuint16m8_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u16m8_mu(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vnmsac_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vnmsac_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, + vuint16m8_t vs2, size_t vl) { return __riscv_vnmsac_vx_u16m8_mu(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vnmsac_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vnmsac_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u32mf2_mu(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vnmsac_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vnmsac_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + uint32_t rs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vnmsac_vx_u32mf2_mu(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vnmsac_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vnmsac_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs1, vuint32m1_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u32m1_mu(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vnmsac_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vnmsac_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, + vuint32m1_t vs2, size_t vl) { return __riscv_vnmsac_vx_u32m1_mu(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vnmsac_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vnmsac_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs1, vuint32m2_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u32m2_mu(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vnmsac_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vnmsac_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, + vuint32m2_t vs2, size_t vl) { return __riscv_vnmsac_vx_u32m2_mu(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vnmsac_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vnmsac_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs1, vuint32m4_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u32m4_mu(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vnmsac_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vnmsac_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vnmsac_vx_u32m4_mu(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vnmsac_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vnmsac_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs1, vuint32m8_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u32m8_mu(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vnmsac_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vnmsac_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, + vuint32m8_t vs2, size_t vl) { return __riscv_vnmsac_vx_u32m8_mu(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vnmsac_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vnmsac_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs1, vuint64m1_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u64m1_mu(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vnmsac_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vnmsac_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, + vuint64m1_t vs2, size_t vl) { return __riscv_vnmsac_vx_u64m1_mu(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vnmsac_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vnmsac_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs1, vuint64m2_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u64m2_mu(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vnmsac_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vnmsac_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, + vuint64m2_t vs2, size_t vl) { return __riscv_vnmsac_vx_u64m2_mu(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vnmsac_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vnmsac_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs1, vuint64m4_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u64m4_mu(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vnmsac_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vnmsac_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, + vuint64m4_t vs2, size_t vl) { return __riscv_vnmsac_vx_u64m4_mu(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vnmsac_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vnmsac_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs1, vuint64m8_t vs2, + size_t vl) { return __riscv_vnmsac_vv_u64m8_mu(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vnmsac_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vnmsac_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, + vuint64m8_t vs2, size_t vl) { return __riscv_vnmsac_vx_u64m8_mu(vm, vd, rs1, vs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vnmsub.c b/auto-generated/policy_funcs/llvm-api-tests/vnmsub.c index 82a661dfd..88ee02668 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vnmsub.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vnmsub.c @@ -1,1415 +1,1857 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vint8mf8_t test_vnmsub_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vnmsub_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs1, + vint8mf8_t vs2, size_t vl) { return __riscv_vnmsub_vv_i8mf8_tu(vd, vs1, vs2, vl); } -vint8mf8_t test_vnmsub_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vnmsub_vx_i8mf8_tu(vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, + size_t vl) { return __riscv_vnmsub_vx_i8mf8_tu(vd, rs1, vs2, vl); } -vint8mf4_t test_vnmsub_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vnmsub_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs1, + vint8mf4_t vs2, size_t vl) { return __riscv_vnmsub_vv_i8mf4_tu(vd, vs1, vs2, vl); } -vint8mf4_t test_vnmsub_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vnmsub_vx_i8mf4_tu(vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, + size_t vl) { return __riscv_vnmsub_vx_i8mf4_tu(vd, rs1, vs2, vl); } -vint8mf2_t test_vnmsub_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vnmsub_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vnmsub_vv_i8mf2_tu(vd, vs1, vs2, vl); } -vint8mf2_t test_vnmsub_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vnmsub_vx_i8mf2_tu(vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, + size_t vl) { return __riscv_vnmsub_vx_i8mf2_tu(vd, rs1, vs2, vl); } -vint8m1_t test_vnmsub_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vnmsub_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, + size_t vl) { return __riscv_vnmsub_vv_i8m1_tu(vd, vs1, vs2, vl); } -vint8m1_t test_vnmsub_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vnmsub_vx_i8m1_tu(vint8m1_t vd, int8_t rs1, vint8m1_t vs2, + size_t vl) { return __riscv_vnmsub_vx_i8m1_tu(vd, rs1, vs2, vl); } -vint8m2_t test_vnmsub_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vnmsub_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, + size_t vl) { return __riscv_vnmsub_vv_i8m2_tu(vd, vs1, vs2, vl); } -vint8m2_t test_vnmsub_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vnmsub_vx_i8m2_tu(vint8m2_t vd, int8_t rs1, vint8m2_t vs2, + size_t vl) { return __riscv_vnmsub_vx_i8m2_tu(vd, rs1, vs2, vl); } -vint8m4_t test_vnmsub_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vnmsub_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, + size_t vl) { return __riscv_vnmsub_vv_i8m4_tu(vd, vs1, vs2, vl); } -vint8m4_t test_vnmsub_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vnmsub_vx_i8m4_tu(vint8m4_t vd, int8_t rs1, vint8m4_t vs2, + size_t vl) { return __riscv_vnmsub_vx_i8m4_tu(vd, rs1, vs2, vl); } -vint8m8_t test_vnmsub_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vnmsub_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, + size_t vl) { return __riscv_vnmsub_vv_i8m8_tu(vd, vs1, vs2, vl); } -vint8m8_t test_vnmsub_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vnmsub_vx_i8m8_tu(vint8m8_t vd, int8_t rs1, vint8m8_t vs2, + size_t vl) { return __riscv_vnmsub_vx_i8m8_tu(vd, rs1, vs2, vl); } -vint16mf4_t test_vnmsub_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vnmsub_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs1, + vint16mf4_t vs2, size_t vl) { return __riscv_vnmsub_vv_i16mf4_tu(vd, vs1, vs2, vl); } -vint16mf4_t test_vnmsub_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vnmsub_vx_i16mf4_tu(vint16mf4_t vd, int16_t rs1, + vint16mf4_t vs2, size_t vl) { return __riscv_vnmsub_vx_i16mf4_tu(vd, rs1, vs2, vl); } -vint16mf2_t test_vnmsub_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vnmsub_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs1, + vint16mf2_t vs2, size_t vl) { return __riscv_vnmsub_vv_i16mf2_tu(vd, vs1, vs2, vl); } -vint16mf2_t test_vnmsub_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vnmsub_vx_i16mf2_tu(vint16mf2_t vd, int16_t rs1, + vint16mf2_t vs2, size_t vl) { return __riscv_vnmsub_vx_i16mf2_tu(vd, rs1, vs2, vl); } -vint16m1_t test_vnmsub_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vnmsub_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs1, + vint16m1_t vs2, size_t vl) { return __riscv_vnmsub_vv_i16m1_tu(vd, vs1, vs2, vl); } -vint16m1_t test_vnmsub_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vnmsub_vx_i16m1_tu(vint16m1_t vd, int16_t rs1, vint16m1_t vs2, + size_t vl) { return __riscv_vnmsub_vx_i16m1_tu(vd, rs1, vs2, vl); } -vint16m2_t test_vnmsub_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vnmsub_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs1, + vint16m2_t vs2, size_t vl) { return __riscv_vnmsub_vv_i16m2_tu(vd, vs1, vs2, vl); } -vint16m2_t test_vnmsub_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vnmsub_vx_i16m2_tu(vint16m2_t vd, int16_t rs1, vint16m2_t vs2, + size_t vl) { return __riscv_vnmsub_vx_i16m2_tu(vd, rs1, vs2, vl); } -vint16m4_t test_vnmsub_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vnmsub_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs1, + vint16m4_t vs2, size_t vl) { return __riscv_vnmsub_vv_i16m4_tu(vd, vs1, vs2, vl); } -vint16m4_t test_vnmsub_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vnmsub_vx_i16m4_tu(vint16m4_t vd, int16_t rs1, vint16m4_t vs2, + size_t vl) { return __riscv_vnmsub_vx_i16m4_tu(vd, rs1, vs2, vl); } -vint16m8_t test_vnmsub_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vnmsub_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs1, + vint16m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_i16m8_tu(vd, vs1, vs2, vl); } -vint16m8_t test_vnmsub_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vnmsub_vx_i16m8_tu(vint16m8_t vd, int16_t rs1, vint16m8_t vs2, + size_t vl) { return __riscv_vnmsub_vx_i16m8_tu(vd, rs1, vs2, vl); } -vint32mf2_t test_vnmsub_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vnmsub_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs1, + vint32mf2_t vs2, size_t vl) { return __riscv_vnmsub_vv_i32mf2_tu(vd, vs1, vs2, vl); } -vint32mf2_t test_vnmsub_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vnmsub_vx_i32mf2_tu(vint32mf2_t vd, int32_t rs1, + vint32mf2_t vs2, size_t vl) { return __riscv_vnmsub_vx_i32mf2_tu(vd, rs1, vs2, vl); } -vint32m1_t test_vnmsub_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vnmsub_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs1, + vint32m1_t vs2, size_t vl) { return __riscv_vnmsub_vv_i32m1_tu(vd, vs1, vs2, vl); } -vint32m1_t test_vnmsub_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vnmsub_vx_i32m1_tu(vint32m1_t vd, int32_t rs1, vint32m1_t vs2, + size_t vl) { return __riscv_vnmsub_vx_i32m1_tu(vd, rs1, vs2, vl); } -vint32m2_t test_vnmsub_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vnmsub_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs1, + vint32m2_t vs2, size_t vl) { return __riscv_vnmsub_vv_i32m2_tu(vd, vs1, vs2, vl); } -vint32m2_t test_vnmsub_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vnmsub_vx_i32m2_tu(vint32m2_t vd, int32_t rs1, vint32m2_t vs2, + size_t vl) { return __riscv_vnmsub_vx_i32m2_tu(vd, rs1, vs2, vl); } -vint32m4_t test_vnmsub_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vnmsub_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs1, + vint32m4_t vs2, size_t vl) { return __riscv_vnmsub_vv_i32m4_tu(vd, vs1, vs2, vl); } -vint32m4_t test_vnmsub_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vnmsub_vx_i32m4_tu(vint32m4_t vd, int32_t rs1, vint32m4_t vs2, + size_t vl) { return __riscv_vnmsub_vx_i32m4_tu(vd, rs1, vs2, vl); } -vint32m8_t test_vnmsub_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vnmsub_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs1, + vint32m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_i32m8_tu(vd, vs1, vs2, vl); } -vint32m8_t test_vnmsub_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vnmsub_vx_i32m8_tu(vint32m8_t vd, int32_t rs1, vint32m8_t vs2, + size_t vl) { return __riscv_vnmsub_vx_i32m8_tu(vd, rs1, vs2, vl); } -vint64m1_t test_vnmsub_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vnmsub_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs1, + vint64m1_t vs2, size_t vl) { return __riscv_vnmsub_vv_i64m1_tu(vd, vs1, vs2, vl); } -vint64m1_t test_vnmsub_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vnmsub_vx_i64m1_tu(vint64m1_t vd, int64_t rs1, vint64m1_t vs2, + size_t vl) { return __riscv_vnmsub_vx_i64m1_tu(vd, rs1, vs2, vl); } -vint64m2_t test_vnmsub_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vnmsub_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs1, + vint64m2_t vs2, size_t vl) { return __riscv_vnmsub_vv_i64m2_tu(vd, vs1, vs2, vl); } -vint64m2_t test_vnmsub_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vnmsub_vx_i64m2_tu(vint64m2_t vd, int64_t rs1, vint64m2_t vs2, + size_t vl) { return __riscv_vnmsub_vx_i64m2_tu(vd, rs1, vs2, vl); } -vint64m4_t test_vnmsub_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vnmsub_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs1, + vint64m4_t vs2, size_t vl) { return __riscv_vnmsub_vv_i64m4_tu(vd, vs1, vs2, vl); } -vint64m4_t test_vnmsub_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vnmsub_vx_i64m4_tu(vint64m4_t vd, int64_t rs1, vint64m4_t vs2, + size_t vl) { return __riscv_vnmsub_vx_i64m4_tu(vd, rs1, vs2, vl); } -vint64m8_t test_vnmsub_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vnmsub_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs1, + vint64m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_i64m8_tu(vd, vs1, vs2, vl); } -vint64m8_t test_vnmsub_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vnmsub_vx_i64m8_tu(vint64m8_t vd, int64_t rs1, vint64m8_t vs2, + size_t vl) { return __riscv_vnmsub_vx_i64m8_tu(vd, rs1, vs2, vl); } -vuint8mf8_t test_vnmsub_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vnmsub_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs1, + vuint8mf8_t vs2, size_t vl) { return __riscv_vnmsub_vv_u8mf8_tu(vd, vs1, vs2, vl); } -vuint8mf8_t test_vnmsub_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vnmsub_vx_u8mf8_tu(vuint8mf8_t vd, uint8_t rs1, + vuint8mf8_t vs2, size_t vl) { return __riscv_vnmsub_vx_u8mf8_tu(vd, rs1, vs2, vl); } -vuint8mf4_t test_vnmsub_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vnmsub_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs1, + vuint8mf4_t vs2, size_t vl) { return __riscv_vnmsub_vv_u8mf4_tu(vd, vs1, vs2, vl); } -vuint8mf4_t test_vnmsub_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vnmsub_vx_u8mf4_tu(vuint8mf4_t vd, uint8_t rs1, + vuint8mf4_t vs2, size_t vl) { return __riscv_vnmsub_vx_u8mf4_tu(vd, rs1, vs2, vl); } -vuint8mf2_t test_vnmsub_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vnmsub_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs1, + vuint8mf2_t vs2, size_t vl) { return __riscv_vnmsub_vv_u8mf2_tu(vd, vs1, vs2, vl); } -vuint8mf2_t test_vnmsub_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vnmsub_vx_u8mf2_tu(vuint8mf2_t vd, uint8_t rs1, + vuint8mf2_t vs2, size_t vl) { return __riscv_vnmsub_vx_u8mf2_tu(vd, rs1, vs2, vl); } -vuint8m1_t test_vnmsub_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vnmsub_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u8m1_tu(vd, vs1, vs2, vl); } -vuint8m1_t test_vnmsub_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vnmsub_vx_u8m1_tu(vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, + size_t vl) { return __riscv_vnmsub_vx_u8m1_tu(vd, rs1, vs2, vl); } -vuint8m2_t test_vnmsub_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vnmsub_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u8m2_tu(vd, vs1, vs2, vl); } -vuint8m2_t test_vnmsub_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vnmsub_vx_u8m2_tu(vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, + size_t vl) { return __riscv_vnmsub_vx_u8m2_tu(vd, rs1, vs2, vl); } -vuint8m4_t test_vnmsub_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vnmsub_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u8m4_tu(vd, vs1, vs2, vl); } -vuint8m4_t test_vnmsub_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vnmsub_vx_u8m4_tu(vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, + size_t vl) { return __riscv_vnmsub_vx_u8m4_tu(vd, rs1, vs2, vl); } -vuint8m8_t test_vnmsub_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vnmsub_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u8m8_tu(vd, vs1, vs2, vl); } -vuint8m8_t test_vnmsub_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vnmsub_vx_u8m8_tu(vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, + size_t vl) { return __riscv_vnmsub_vx_u8m8_tu(vd, rs1, vs2, vl); } -vuint16mf4_t test_vnmsub_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vnmsub_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs1, + vuint16mf4_t vs2, size_t vl) { return __riscv_vnmsub_vv_u16mf4_tu(vd, vs1, vs2, vl); } -vuint16mf4_t test_vnmsub_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vnmsub_vx_u16mf4_tu(vuint16mf4_t vd, uint16_t rs1, + vuint16mf4_t vs2, size_t vl) { return __riscv_vnmsub_vx_u16mf4_tu(vd, rs1, vs2, vl); } -vuint16mf2_t test_vnmsub_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vnmsub_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs1, + vuint16mf2_t vs2, size_t vl) { return __riscv_vnmsub_vv_u16mf2_tu(vd, vs1, vs2, vl); } -vuint16mf2_t test_vnmsub_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vnmsub_vx_u16mf2_tu(vuint16mf2_t vd, uint16_t rs1, + vuint16mf2_t vs2, size_t vl) { return __riscv_vnmsub_vx_u16mf2_tu(vd, rs1, vs2, vl); } -vuint16m1_t test_vnmsub_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vnmsub_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs1, + vuint16m1_t vs2, size_t vl) { return __riscv_vnmsub_vv_u16m1_tu(vd, vs1, vs2, vl); } -vuint16m1_t test_vnmsub_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vnmsub_vx_u16m1_tu(vuint16m1_t vd, uint16_t rs1, + vuint16m1_t vs2, size_t vl) { return __riscv_vnmsub_vx_u16m1_tu(vd, rs1, vs2, vl); } -vuint16m2_t test_vnmsub_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vnmsub_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vnmsub_vv_u16m2_tu(vd, vs1, vs2, vl); } -vuint16m2_t test_vnmsub_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vnmsub_vx_u16m2_tu(vuint16m2_t vd, uint16_t rs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vnmsub_vx_u16m2_tu(vd, rs1, vs2, vl); } -vuint16m4_t test_vnmsub_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vnmsub_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vnmsub_vv_u16m4_tu(vd, vs1, vs2, vl); } -vuint16m4_t test_vnmsub_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vnmsub_vx_u16m4_tu(vuint16m4_t vd, uint16_t rs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vnmsub_vx_u16m4_tu(vd, rs1, vs2, vl); } -vuint16m8_t test_vnmsub_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vnmsub_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs1, + vuint16m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_u16m8_tu(vd, vs1, vs2, vl); } -vuint16m8_t test_vnmsub_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vnmsub_vx_u16m8_tu(vuint16m8_t vd, uint16_t rs1, + vuint16m8_t vs2, size_t vl) { return __riscv_vnmsub_vx_u16m8_tu(vd, rs1, vs2, vl); } -vuint32mf2_t test_vnmsub_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vnmsub_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs1, + vuint32mf2_t vs2, size_t vl) { return __riscv_vnmsub_vv_u32mf2_tu(vd, vs1, vs2, vl); } -vuint32mf2_t test_vnmsub_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vnmsub_vx_u32mf2_tu(vuint32mf2_t vd, uint32_t rs1, + vuint32mf2_t vs2, size_t vl) { return __riscv_vnmsub_vx_u32mf2_tu(vd, rs1, vs2, vl); } -vuint32m1_t test_vnmsub_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vnmsub_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs1, + vuint32m1_t vs2, size_t vl) { return __riscv_vnmsub_vv_u32m1_tu(vd, vs1, vs2, vl); } -vuint32m1_t test_vnmsub_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vnmsub_vx_u32m1_tu(vuint32m1_t vd, uint32_t rs1, + vuint32m1_t vs2, size_t vl) { return __riscv_vnmsub_vx_u32m1_tu(vd, rs1, vs2, vl); } -vuint32m2_t test_vnmsub_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vnmsub_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs1, + vuint32m2_t vs2, size_t vl) { return __riscv_vnmsub_vv_u32m2_tu(vd, vs1, vs2, vl); } -vuint32m2_t test_vnmsub_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vnmsub_vx_u32m2_tu(vuint32m2_t vd, uint32_t rs1, + vuint32m2_t vs2, size_t vl) { return __riscv_vnmsub_vx_u32m2_tu(vd, rs1, vs2, vl); } -vuint32m4_t test_vnmsub_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vnmsub_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vnmsub_vv_u32m4_tu(vd, vs1, vs2, vl); } -vuint32m4_t test_vnmsub_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vnmsub_vx_u32m4_tu(vuint32m4_t vd, uint32_t rs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vnmsub_vx_u32m4_tu(vd, rs1, vs2, vl); } -vuint32m8_t test_vnmsub_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vnmsub_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs1, + vuint32m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_u32m8_tu(vd, vs1, vs2, vl); } -vuint32m8_t test_vnmsub_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vnmsub_vx_u32m8_tu(vuint32m8_t vd, uint32_t rs1, + vuint32m8_t vs2, size_t vl) { return __riscv_vnmsub_vx_u32m8_tu(vd, rs1, vs2, vl); } -vuint64m1_t test_vnmsub_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vnmsub_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs1, + vuint64m1_t vs2, size_t vl) { return __riscv_vnmsub_vv_u64m1_tu(vd, vs1, vs2, vl); } -vuint64m1_t test_vnmsub_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vnmsub_vx_u64m1_tu(vuint64m1_t vd, uint64_t rs1, + vuint64m1_t vs2, size_t vl) { return __riscv_vnmsub_vx_u64m1_tu(vd, rs1, vs2, vl); } -vuint64m2_t test_vnmsub_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vnmsub_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs1, + vuint64m2_t vs2, size_t vl) { return __riscv_vnmsub_vv_u64m2_tu(vd, vs1, vs2, vl); } -vuint64m2_t test_vnmsub_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vnmsub_vx_u64m2_tu(vuint64m2_t vd, uint64_t rs1, + vuint64m2_t vs2, size_t vl) { return __riscv_vnmsub_vx_u64m2_tu(vd, rs1, vs2, vl); } -vuint64m4_t test_vnmsub_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vnmsub_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs1, + vuint64m4_t vs2, size_t vl) { return __riscv_vnmsub_vv_u64m4_tu(vd, vs1, vs2, vl); } -vuint64m4_t test_vnmsub_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vnmsub_vx_u64m4_tu(vuint64m4_t vd, uint64_t rs1, + vuint64m4_t vs2, size_t vl) { return __riscv_vnmsub_vx_u64m4_tu(vd, rs1, vs2, vl); } -vuint64m8_t test_vnmsub_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vnmsub_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs1, + vuint64m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_u64m8_tu(vd, vs1, vs2, vl); } -vuint64m8_t test_vnmsub_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vnmsub_vx_u64m8_tu(vuint64m8_t vd, uint64_t rs1, + vuint64m8_t vs2, size_t vl) { return __riscv_vnmsub_vx_u64m8_tu(vd, rs1, vs2, vl); } -vint8mf8_t test_vnmsub_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vnmsub_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, + vint8mf8_t vs2, size_t vl) { return __riscv_vnmsub_vv_i8mf8_tum(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vnmsub_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vnmsub_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, int8_t rs1, + vint8mf8_t vs2, size_t vl) { return __riscv_vnmsub_vx_i8mf8_tum(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vnmsub_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vnmsub_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, + vint8mf4_t vs2, size_t vl) { return __riscv_vnmsub_vv_i8mf4_tum(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vnmsub_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vnmsub_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, int8_t rs1, + vint8mf4_t vs2, size_t vl) { return __riscv_vnmsub_vx_i8mf4_tum(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vnmsub_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vnmsub_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vnmsub_vv_i8mf2_tum(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vnmsub_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vnmsub_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, int8_t rs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vnmsub_vx_i8mf2_tum(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vnmsub_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vnmsub_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, + vint8m1_t vs2, size_t vl) { return __riscv_vnmsub_vv_i8m1_tum(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vnmsub_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vnmsub_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, int8_t rs1, + vint8m1_t vs2, size_t vl) { return __riscv_vnmsub_vx_i8m1_tum(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vnmsub_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vnmsub_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, + vint8m2_t vs2, size_t vl) { return __riscv_vnmsub_vv_i8m2_tum(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vnmsub_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vnmsub_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, int8_t rs1, + vint8m2_t vs2, size_t vl) { return __riscv_vnmsub_vx_i8m2_tum(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vnmsub_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vnmsub_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, + vint8m4_t vs2, size_t vl) { return __riscv_vnmsub_vv_i8m4_tum(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vnmsub_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vnmsub_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, int8_t rs1, + vint8m4_t vs2, size_t vl) { return __riscv_vnmsub_vx_i8m4_tum(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vnmsub_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vnmsub_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, + vint8m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_i8m8_tum(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vnmsub_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vnmsub_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, int8_t rs1, + vint8m8_t vs2, size_t vl) { return __riscv_vnmsub_vx_i8m8_tum(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vnmsub_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vnmsub_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs1, vint16mf4_t vs2, + size_t vl) { return __riscv_vnmsub_vv_i16mf4_tum(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vnmsub_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vnmsub_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, int16_t rs1, + vint16mf4_t vs2, size_t vl) { return __riscv_vnmsub_vx_i16mf4_tum(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vnmsub_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vnmsub_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs1, vint16mf2_t vs2, + size_t vl) { return __riscv_vnmsub_vv_i16mf2_tum(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vnmsub_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vnmsub_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, int16_t rs1, + vint16mf2_t vs2, size_t vl) { return __riscv_vnmsub_vx_i16mf2_tum(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vnmsub_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vnmsub_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, + vint16m1_t vs2, size_t vl) { return __riscv_vnmsub_vv_i16m1_tum(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vnmsub_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vnmsub_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, int16_t rs1, + vint16m1_t vs2, size_t vl) { return __riscv_vnmsub_vx_i16m1_tum(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vnmsub_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vnmsub_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, + vint16m2_t vs2, size_t vl) { return __riscv_vnmsub_vv_i16m2_tum(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vnmsub_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vnmsub_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, int16_t rs1, + vint16m2_t vs2, size_t vl) { return __riscv_vnmsub_vx_i16m2_tum(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vnmsub_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vnmsub_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, + vint16m4_t vs2, size_t vl) { return __riscv_vnmsub_vv_i16m4_tum(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vnmsub_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vnmsub_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, int16_t rs1, + vint16m4_t vs2, size_t vl) { return __riscv_vnmsub_vx_i16m4_tum(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vnmsub_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vnmsub_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, + vint16m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_i16m8_tum(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vnmsub_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vnmsub_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, int16_t rs1, + vint16m8_t vs2, size_t vl) { return __riscv_vnmsub_vx_i16m8_tum(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vnmsub_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vnmsub_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs1, vint32mf2_t vs2, + size_t vl) { return __riscv_vnmsub_vv_i32mf2_tum(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vnmsub_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vnmsub_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, int32_t rs1, + vint32mf2_t vs2, size_t vl) { return __riscv_vnmsub_vx_i32mf2_tum(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vnmsub_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vnmsub_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, + vint32m1_t vs2, size_t vl) { return __riscv_vnmsub_vv_i32m1_tum(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vnmsub_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vnmsub_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, int32_t rs1, + vint32m1_t vs2, size_t vl) { return __riscv_vnmsub_vx_i32m1_tum(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vnmsub_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vnmsub_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, + vint32m2_t vs2, size_t vl) { return __riscv_vnmsub_vv_i32m2_tum(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vnmsub_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vnmsub_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, int32_t rs1, + vint32m2_t vs2, size_t vl) { return __riscv_vnmsub_vx_i32m2_tum(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vnmsub_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vnmsub_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, + vint32m4_t vs2, size_t vl) { return __riscv_vnmsub_vv_i32m4_tum(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vnmsub_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vnmsub_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, int32_t rs1, + vint32m4_t vs2, size_t vl) { return __riscv_vnmsub_vx_i32m4_tum(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vnmsub_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vnmsub_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, + vint32m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_i32m8_tum(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vnmsub_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vnmsub_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, int32_t rs1, + vint32m8_t vs2, size_t vl) { return __riscv_vnmsub_vx_i32m8_tum(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vnmsub_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vnmsub_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, + vint64m1_t vs2, size_t vl) { return __riscv_vnmsub_vv_i64m1_tum(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vnmsub_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vnmsub_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, int64_t rs1, + vint64m1_t vs2, size_t vl) { return __riscv_vnmsub_vx_i64m1_tum(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vnmsub_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vnmsub_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, + vint64m2_t vs2, size_t vl) { return __riscv_vnmsub_vv_i64m2_tum(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vnmsub_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vnmsub_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, int64_t rs1, + vint64m2_t vs2, size_t vl) { return __riscv_vnmsub_vx_i64m2_tum(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vnmsub_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vnmsub_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, + vint64m4_t vs2, size_t vl) { return __riscv_vnmsub_vv_i64m4_tum(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vnmsub_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vnmsub_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, int64_t rs1, + vint64m4_t vs2, size_t vl) { return __riscv_vnmsub_vx_i64m4_tum(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vnmsub_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vnmsub_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, + vint64m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_i64m8_tum(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vnmsub_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vnmsub_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, int64_t rs1, + vint64m8_t vs2, size_t vl) { return __riscv_vnmsub_vx_i64m8_tum(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vnmsub_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vnmsub_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs1, vuint8mf8_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u8mf8_tum(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vnmsub_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vnmsub_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, + vuint8mf8_t vs2, size_t vl) { return __riscv_vnmsub_vx_u8mf8_tum(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vnmsub_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vnmsub_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs1, vuint8mf4_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u8mf4_tum(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vnmsub_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vnmsub_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, + vuint8mf4_t vs2, size_t vl) { return __riscv_vnmsub_vx_u8mf4_tum(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vnmsub_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vnmsub_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs1, vuint8mf2_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u8mf2_tum(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vnmsub_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vnmsub_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, + vuint8mf2_t vs2, size_t vl) { return __riscv_vnmsub_vx_u8mf2_tum(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vnmsub_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vnmsub_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vnmsub_vv_u8m1_tum(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vnmsub_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vnmsub_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vnmsub_vx_u8m1_tum(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vnmsub_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vnmsub_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vnmsub_vv_u8m2_tum(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vnmsub_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vnmsub_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vnmsub_vx_u8m2_tum(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vnmsub_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vnmsub_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vnmsub_vv_u8m4_tum(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vnmsub_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vnmsub_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vnmsub_vx_u8m4_tum(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vnmsub_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vnmsub_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, + vuint8m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_u8m8_tum(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vnmsub_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vnmsub_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, + vuint8m8_t vs2, size_t vl) { return __riscv_vnmsub_vx_u8m8_tum(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vnmsub_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vnmsub_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u16mf4_tum(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vnmsub_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vnmsub_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + uint16_t rs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vnmsub_vx_u16mf4_tum(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vnmsub_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vnmsub_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u16mf2_tum(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vnmsub_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vnmsub_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + uint16_t rs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vnmsub_vx_u16mf2_tum(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vnmsub_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vnmsub_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs1, vuint16m1_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u16m1_tum(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vnmsub_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vnmsub_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, + vuint16m1_t vs2, size_t vl) { return __riscv_vnmsub_vx_u16m1_tum(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vnmsub_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vnmsub_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs1, vuint16m2_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u16m2_tum(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vnmsub_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vnmsub_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vnmsub_vx_u16m2_tum(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vnmsub_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vnmsub_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs1, vuint16m4_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u16m4_tum(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vnmsub_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vnmsub_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vnmsub_vx_u16m4_tum(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vnmsub_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vnmsub_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs1, vuint16m8_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u16m8_tum(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vnmsub_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vnmsub_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, + vuint16m8_t vs2, size_t vl) { return __riscv_vnmsub_vx_u16m8_tum(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vnmsub_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vnmsub_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u32mf2_tum(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vnmsub_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vnmsub_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + uint32_t rs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vnmsub_vx_u32mf2_tum(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vnmsub_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vnmsub_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs1, vuint32m1_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u32m1_tum(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vnmsub_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vnmsub_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, + vuint32m1_t vs2, size_t vl) { return __riscv_vnmsub_vx_u32m1_tum(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vnmsub_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vnmsub_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs1, vuint32m2_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u32m2_tum(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vnmsub_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vnmsub_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, + vuint32m2_t vs2, size_t vl) { return __riscv_vnmsub_vx_u32m2_tum(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vnmsub_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vnmsub_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs1, vuint32m4_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u32m4_tum(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vnmsub_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vnmsub_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vnmsub_vx_u32m4_tum(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vnmsub_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vnmsub_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs1, vuint32m8_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u32m8_tum(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vnmsub_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vnmsub_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, + vuint32m8_t vs2, size_t vl) { return __riscv_vnmsub_vx_u32m8_tum(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vnmsub_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vnmsub_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs1, vuint64m1_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u64m1_tum(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vnmsub_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vnmsub_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, + vuint64m1_t vs2, size_t vl) { return __riscv_vnmsub_vx_u64m1_tum(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vnmsub_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vnmsub_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs1, vuint64m2_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u64m2_tum(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vnmsub_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vnmsub_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, + vuint64m2_t vs2, size_t vl) { return __riscv_vnmsub_vx_u64m2_tum(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vnmsub_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vnmsub_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs1, vuint64m4_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u64m4_tum(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vnmsub_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vnmsub_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, + vuint64m4_t vs2, size_t vl) { return __riscv_vnmsub_vx_u64m4_tum(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vnmsub_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vnmsub_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs1, vuint64m8_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u64m8_tum(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vnmsub_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vnmsub_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, + vuint64m8_t vs2, size_t vl) { return __riscv_vnmsub_vx_u64m8_tum(vm, vd, rs1, vs2, vl); } -vint8mf8_t test_vnmsub_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vnmsub_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + vint8mf8_t vs1, vint8mf8_t vs2, + size_t vl) { return __riscv_vnmsub_vv_i8mf8_tumu(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vnmsub_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vnmsub_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, + vint8mf8_t vs2, size_t vl) { return __riscv_vnmsub_vx_i8mf8_tumu(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vnmsub_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vnmsub_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + vint8mf4_t vs1, vint8mf4_t vs2, + size_t vl) { return __riscv_vnmsub_vv_i8mf4_tumu(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vnmsub_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vnmsub_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, + vint8mf4_t vs2, size_t vl) { return __riscv_vnmsub_vx_i8mf4_tumu(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vnmsub_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vnmsub_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + vint8mf2_t vs1, vint8mf2_t vs2, + size_t vl) { return __riscv_vnmsub_vv_i8mf2_tumu(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vnmsub_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vnmsub_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vnmsub_vx_i8mf2_tumu(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vnmsub_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vnmsub_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, + vint8m1_t vs2, size_t vl) { return __riscv_vnmsub_vv_i8m1_tumu(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vnmsub_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vnmsub_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, int8_t rs1, + vint8m1_t vs2, size_t vl) { return __riscv_vnmsub_vx_i8m1_tumu(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vnmsub_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vnmsub_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, + vint8m2_t vs2, size_t vl) { return __riscv_vnmsub_vv_i8m2_tumu(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vnmsub_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vnmsub_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, int8_t rs1, + vint8m2_t vs2, size_t vl) { return __riscv_vnmsub_vx_i8m2_tumu(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vnmsub_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vnmsub_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, + vint8m4_t vs2, size_t vl) { return __riscv_vnmsub_vv_i8m4_tumu(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vnmsub_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vnmsub_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, int8_t rs1, + vint8m4_t vs2, size_t vl) { return __riscv_vnmsub_vx_i8m4_tumu(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vnmsub_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vnmsub_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, + vint8m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_i8m8_tumu(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vnmsub_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vnmsub_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, int8_t rs1, + vint8m8_t vs2, size_t vl) { return __riscv_vnmsub_vx_i8m8_tumu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vnmsub_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vnmsub_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs1, vint16mf4_t vs2, + size_t vl) { return __riscv_vnmsub_vv_i16mf4_tumu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vnmsub_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vnmsub_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + int16_t rs1, vint16mf4_t vs2, + size_t vl) { return __riscv_vnmsub_vx_i16mf4_tumu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vnmsub_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vnmsub_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs1, vint16mf2_t vs2, + size_t vl) { return __riscv_vnmsub_vv_i16mf2_tumu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vnmsub_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vnmsub_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + int16_t rs1, vint16mf2_t vs2, + size_t vl) { return __riscv_vnmsub_vx_i16mf2_tumu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vnmsub_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vnmsub_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs1, vint16m1_t vs2, + size_t vl) { return __riscv_vnmsub_vv_i16m1_tumu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vnmsub_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vnmsub_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, int16_t rs1, + vint16m1_t vs2, size_t vl) { return __riscv_vnmsub_vx_i16m1_tumu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vnmsub_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vnmsub_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, + vint16m2_t vs2, size_t vl) { return __riscv_vnmsub_vv_i16m2_tumu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vnmsub_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vnmsub_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, int16_t rs1, + vint16m2_t vs2, size_t vl) { return __riscv_vnmsub_vx_i16m2_tumu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vnmsub_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vnmsub_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, + vint16m4_t vs2, size_t vl) { return __riscv_vnmsub_vv_i16m4_tumu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vnmsub_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vnmsub_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, int16_t rs1, + vint16m4_t vs2, size_t vl) { return __riscv_vnmsub_vx_i16m4_tumu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vnmsub_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vnmsub_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, + vint16m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_i16m8_tumu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vnmsub_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vnmsub_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, int16_t rs1, + vint16m8_t vs2, size_t vl) { return __riscv_vnmsub_vx_i16m8_tumu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vnmsub_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vnmsub_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs1, vint32mf2_t vs2, + size_t vl) { return __riscv_vnmsub_vv_i32mf2_tumu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vnmsub_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vnmsub_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + int32_t rs1, vint32mf2_t vs2, + size_t vl) { return __riscv_vnmsub_vx_i32mf2_tumu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vnmsub_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vnmsub_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs1, vint32m1_t vs2, + size_t vl) { return __riscv_vnmsub_vv_i32m1_tumu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vnmsub_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vnmsub_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, int32_t rs1, + vint32m1_t vs2, size_t vl) { return __riscv_vnmsub_vx_i32m1_tumu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vnmsub_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vnmsub_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + vint32m2_t vs1, vint32m2_t vs2, + size_t vl) { return __riscv_vnmsub_vv_i32m2_tumu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vnmsub_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vnmsub_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, int32_t rs1, + vint32m2_t vs2, size_t vl) { return __riscv_vnmsub_vx_i32m2_tumu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vnmsub_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vnmsub_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, + vint32m4_t vs2, size_t vl) { return __riscv_vnmsub_vv_i32m4_tumu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vnmsub_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vnmsub_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, int32_t rs1, + vint32m4_t vs2, size_t vl) { return __riscv_vnmsub_vx_i32m4_tumu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vnmsub_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vnmsub_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, + vint32m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_i32m8_tumu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vnmsub_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vnmsub_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, int32_t rs1, + vint32m8_t vs2, size_t vl) { return __riscv_vnmsub_vx_i32m8_tumu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vnmsub_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vnmsub_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs1, vint64m1_t vs2, + size_t vl) { return __riscv_vnmsub_vv_i64m1_tumu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vnmsub_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vnmsub_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, int64_t rs1, + vint64m1_t vs2, size_t vl) { return __riscv_vnmsub_vx_i64m1_tumu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vnmsub_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vnmsub_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + vint64m2_t vs1, vint64m2_t vs2, + size_t vl) { return __riscv_vnmsub_vv_i64m2_tumu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vnmsub_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vnmsub_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, int64_t rs1, + vint64m2_t vs2, size_t vl) { return __riscv_vnmsub_vx_i64m2_tumu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vnmsub_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vnmsub_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + vint64m4_t vs1, vint64m4_t vs2, + size_t vl) { return __riscv_vnmsub_vv_i64m4_tumu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vnmsub_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vnmsub_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, int64_t rs1, + vint64m4_t vs2, size_t vl) { return __riscv_vnmsub_vx_i64m4_tumu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vnmsub_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vnmsub_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, + vint64m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_i64m8_tumu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vnmsub_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vnmsub_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, int64_t rs1, + vint64m8_t vs2, size_t vl) { return __riscv_vnmsub_vx_i64m8_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vnmsub_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vnmsub_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs1, vuint8mf8_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u8mf8_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vnmsub_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vnmsub_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, + vuint8mf8_t vs2, size_t vl) { return __riscv_vnmsub_vx_u8mf8_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vnmsub_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vnmsub_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs1, vuint8mf4_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u8mf4_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vnmsub_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vnmsub_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, + vuint8mf4_t vs2, size_t vl) { return __riscv_vnmsub_vx_u8mf4_tumu(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vnmsub_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vnmsub_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs1, vuint8mf2_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u8mf2_tumu(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vnmsub_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vnmsub_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, + vuint8mf2_t vs2, size_t vl) { return __riscv_vnmsub_vx_u8mf2_tumu(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vnmsub_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vnmsub_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vnmsub_vv_u8m1_tumu(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vnmsub_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vnmsub_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vnmsub_vx_u8m1_tumu(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vnmsub_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vnmsub_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vnmsub_vv_u8m2_tumu(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vnmsub_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vnmsub_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vnmsub_vx_u8m2_tumu(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vnmsub_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vnmsub_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vnmsub_vv_u8m4_tumu(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vnmsub_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vnmsub_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vnmsub_vx_u8m4_tumu(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vnmsub_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vnmsub_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, + vuint8m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_u8m8_tumu(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vnmsub_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vnmsub_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, + vuint8m8_t vs2, size_t vl) { return __riscv_vnmsub_vx_u8m8_tumu(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vnmsub_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vnmsub_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u16mf4_tumu(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vnmsub_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vnmsub_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + uint16_t rs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vnmsub_vx_u16mf4_tumu(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vnmsub_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vnmsub_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u16mf2_tumu(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vnmsub_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vnmsub_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + uint16_t rs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vnmsub_vx_u16mf2_tumu(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vnmsub_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vnmsub_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs1, vuint16m1_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u16m1_tumu(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vnmsub_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vnmsub_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + uint16_t rs1, vuint16m1_t vs2, + size_t vl) { return __riscv_vnmsub_vx_u16m1_tumu(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vnmsub_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vnmsub_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs1, vuint16m2_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u16m2_tumu(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vnmsub_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vnmsub_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vnmsub_vx_u16m2_tumu(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vnmsub_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vnmsub_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs1, vuint16m4_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u16m4_tumu(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vnmsub_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vnmsub_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vnmsub_vx_u16m4_tumu(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vnmsub_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vnmsub_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs1, vuint16m8_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u16m8_tumu(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vnmsub_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vnmsub_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, + vuint16m8_t vs2, size_t vl) { return __riscv_vnmsub_vx_u16m8_tumu(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vnmsub_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vnmsub_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u32mf2_tumu(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vnmsub_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vnmsub_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + uint32_t rs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vnmsub_vx_u32mf2_tumu(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vnmsub_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vnmsub_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs1, vuint32m1_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u32m1_tumu(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vnmsub_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vnmsub_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + uint32_t rs1, vuint32m1_t vs2, + size_t vl) { return __riscv_vnmsub_vx_u32m1_tumu(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vnmsub_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vnmsub_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs1, vuint32m2_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u32m2_tumu(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vnmsub_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vnmsub_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + uint32_t rs1, vuint32m2_t vs2, + size_t vl) { return __riscv_vnmsub_vx_u32m2_tumu(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vnmsub_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vnmsub_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs1, vuint32m4_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u32m4_tumu(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vnmsub_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vnmsub_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vnmsub_vx_u32m4_tumu(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vnmsub_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vnmsub_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs1, vuint32m8_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u32m8_tumu(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vnmsub_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vnmsub_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, + vuint32m8_t vs2, size_t vl) { return __riscv_vnmsub_vx_u32m8_tumu(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vnmsub_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vnmsub_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs1, vuint64m1_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u64m1_tumu(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vnmsub_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vnmsub_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + uint64_t rs1, vuint64m1_t vs2, + size_t vl) { return __riscv_vnmsub_vx_u64m1_tumu(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vnmsub_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vnmsub_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs1, vuint64m2_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u64m2_tumu(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vnmsub_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vnmsub_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + uint64_t rs1, vuint64m2_t vs2, + size_t vl) { return __riscv_vnmsub_vx_u64m2_tumu(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vnmsub_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vnmsub_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs1, vuint64m4_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u64m4_tumu(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vnmsub_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vnmsub_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + uint64_t rs1, vuint64m4_t vs2, + size_t vl) { return __riscv_vnmsub_vx_u64m4_tumu(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vnmsub_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vnmsub_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs1, vuint64m8_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u64m8_tumu(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vnmsub_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vnmsub_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, + vuint64m8_t vs2, size_t vl) { return __riscv_vnmsub_vx_u64m8_tumu(vm, vd, rs1, vs2, vl); } -vint8mf8_t test_vnmsub_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vnmsub_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs1, + vint8mf8_t vs2, size_t vl) { return __riscv_vnmsub_vv_i8mf8_mu(vm, vd, vs1, vs2, vl); } -vint8mf8_t test_vnmsub_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { +vint8mf8_t test_vnmsub_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, int8_t rs1, + vint8mf8_t vs2, size_t vl) { return __riscv_vnmsub_vx_i8mf8_mu(vm, vd, rs1, vs2, vl); } -vint8mf4_t test_vnmsub_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vnmsub_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs1, + vint8mf4_t vs2, size_t vl) { return __riscv_vnmsub_vv_i8mf4_mu(vm, vd, vs1, vs2, vl); } -vint8mf4_t test_vnmsub_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { +vint8mf4_t test_vnmsub_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, int8_t rs1, + vint8mf4_t vs2, size_t vl) { return __riscv_vnmsub_vx_i8mf4_mu(vm, vd, rs1, vs2, vl); } -vint8mf2_t test_vnmsub_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vnmsub_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vnmsub_vv_i8mf2_mu(vm, vd, vs1, vs2, vl); } -vint8mf2_t test_vnmsub_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { +vint8mf2_t test_vnmsub_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, int8_t rs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vnmsub_vx_i8mf2_mu(vm, vd, rs1, vs2, vl); } -vint8m1_t test_vnmsub_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vnmsub_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs1, + vint8m1_t vs2, size_t vl) { return __riscv_vnmsub_vv_i8m1_mu(vm, vd, vs1, vs2, vl); } -vint8m1_t test_vnmsub_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { +vint8m1_t test_vnmsub_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, int8_t rs1, + vint8m1_t vs2, size_t vl) { return __riscv_vnmsub_vx_i8m1_mu(vm, vd, rs1, vs2, vl); } -vint8m2_t test_vnmsub_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vnmsub_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs1, + vint8m2_t vs2, size_t vl) { return __riscv_vnmsub_vv_i8m2_mu(vm, vd, vs1, vs2, vl); } -vint8m2_t test_vnmsub_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { +vint8m2_t test_vnmsub_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, int8_t rs1, + vint8m2_t vs2, size_t vl) { return __riscv_vnmsub_vx_i8m2_mu(vm, vd, rs1, vs2, vl); } -vint8m4_t test_vnmsub_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vnmsub_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs1, + vint8m4_t vs2, size_t vl) { return __riscv_vnmsub_vv_i8m4_mu(vm, vd, vs1, vs2, vl); } -vint8m4_t test_vnmsub_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { +vint8m4_t test_vnmsub_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, int8_t rs1, + vint8m4_t vs2, size_t vl) { return __riscv_vnmsub_vx_i8m4_mu(vm, vd, rs1, vs2, vl); } -vint8m8_t test_vnmsub_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vnmsub_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs1, + vint8m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_i8m8_mu(vm, vd, vs1, vs2, vl); } -vint8m8_t test_vnmsub_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, int8_t rs1, vint8m8_t vs2, size_t vl) { +vint8m8_t test_vnmsub_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, int8_t rs1, + vint8m8_t vs2, size_t vl) { return __riscv_vnmsub_vx_i8m8_mu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vnmsub_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vnmsub_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs1, vint16mf4_t vs2, + size_t vl) { return __riscv_vnmsub_vv_i16mf4_mu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vnmsub_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { +vint16mf4_t test_vnmsub_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, int16_t rs1, + vint16mf4_t vs2, size_t vl) { return __riscv_vnmsub_vx_i16mf4_mu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vnmsub_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vnmsub_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs1, vint16mf2_t vs2, + size_t vl) { return __riscv_vnmsub_vv_i16mf2_mu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vnmsub_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { +vint16mf2_t test_vnmsub_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, int16_t rs1, + vint16mf2_t vs2, size_t vl) { return __riscv_vnmsub_vx_i16mf2_mu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vnmsub_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vnmsub_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs1, + vint16m1_t vs2, size_t vl) { return __riscv_vnmsub_vv_i16m1_mu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vnmsub_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { +vint16m1_t test_vnmsub_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, int16_t rs1, + vint16m1_t vs2, size_t vl) { return __riscv_vnmsub_vx_i16m1_mu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vnmsub_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vnmsub_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs1, + vint16m2_t vs2, size_t vl) { return __riscv_vnmsub_vv_i16m2_mu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vnmsub_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { +vint16m2_t test_vnmsub_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, int16_t rs1, + vint16m2_t vs2, size_t vl) { return __riscv_vnmsub_vx_i16m2_mu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vnmsub_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vnmsub_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs1, + vint16m4_t vs2, size_t vl) { return __riscv_vnmsub_vv_i16m4_mu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vnmsub_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { +vint16m4_t test_vnmsub_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, int16_t rs1, + vint16m4_t vs2, size_t vl) { return __riscv_vnmsub_vx_i16m4_mu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vnmsub_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vnmsub_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs1, + vint16m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_i16m8_mu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vnmsub_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, int16_t rs1, vint16m8_t vs2, size_t vl) { +vint16m8_t test_vnmsub_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, int16_t rs1, + vint16m8_t vs2, size_t vl) { return __riscv_vnmsub_vx_i16m8_mu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vnmsub_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vnmsub_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs1, vint32mf2_t vs2, + size_t vl) { return __riscv_vnmsub_vv_i32mf2_mu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vnmsub_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { +vint32mf2_t test_vnmsub_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, int32_t rs1, + vint32mf2_t vs2, size_t vl) { return __riscv_vnmsub_vx_i32mf2_mu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vnmsub_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vnmsub_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs1, + vint32m1_t vs2, size_t vl) { return __riscv_vnmsub_vv_i32m1_mu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vnmsub_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { +vint32m1_t test_vnmsub_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, int32_t rs1, + vint32m1_t vs2, size_t vl) { return __riscv_vnmsub_vx_i32m1_mu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vnmsub_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vnmsub_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs1, + vint32m2_t vs2, size_t vl) { return __riscv_vnmsub_vv_i32m2_mu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vnmsub_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { +vint32m2_t test_vnmsub_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, int32_t rs1, + vint32m2_t vs2, size_t vl) { return __riscv_vnmsub_vx_i32m2_mu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vnmsub_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vnmsub_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs1, + vint32m4_t vs2, size_t vl) { return __riscv_vnmsub_vv_i32m4_mu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vnmsub_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { +vint32m4_t test_vnmsub_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, int32_t rs1, + vint32m4_t vs2, size_t vl) { return __riscv_vnmsub_vx_i32m4_mu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vnmsub_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vnmsub_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs1, + vint32m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_i32m8_mu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vnmsub_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, int32_t rs1, vint32m8_t vs2, size_t vl) { +vint32m8_t test_vnmsub_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, int32_t rs1, + vint32m8_t vs2, size_t vl) { return __riscv_vnmsub_vx_i32m8_mu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vnmsub_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vnmsub_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs1, + vint64m1_t vs2, size_t vl) { return __riscv_vnmsub_vv_i64m1_mu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vnmsub_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, int64_t rs1, vint64m1_t vs2, size_t vl) { +vint64m1_t test_vnmsub_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, int64_t rs1, + vint64m1_t vs2, size_t vl) { return __riscv_vnmsub_vx_i64m1_mu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vnmsub_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vnmsub_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs1, + vint64m2_t vs2, size_t vl) { return __riscv_vnmsub_vv_i64m2_mu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vnmsub_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, int64_t rs1, vint64m2_t vs2, size_t vl) { +vint64m2_t test_vnmsub_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, int64_t rs1, + vint64m2_t vs2, size_t vl) { return __riscv_vnmsub_vx_i64m2_mu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vnmsub_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vnmsub_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs1, + vint64m4_t vs2, size_t vl) { return __riscv_vnmsub_vv_i64m4_mu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vnmsub_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, int64_t rs1, vint64m4_t vs2, size_t vl) { +vint64m4_t test_vnmsub_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, int64_t rs1, + vint64m4_t vs2, size_t vl) { return __riscv_vnmsub_vx_i64m4_mu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vnmsub_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vnmsub_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs1, + vint64m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_i64m8_mu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vnmsub_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, int64_t rs1, vint64m8_t vs2, size_t vl) { +vint64m8_t test_vnmsub_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, int64_t rs1, + vint64m8_t vs2, size_t vl) { return __riscv_vnmsub_vx_i64m8_mu(vm, vd, rs1, vs2, vl); } -vuint8mf8_t test_vnmsub_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vnmsub_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs1, vuint8mf8_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u8mf8_mu(vm, vd, vs1, vs2, vl); } -vuint8mf8_t test_vnmsub_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { +vuint8mf8_t test_vnmsub_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, uint8_t rs1, + vuint8mf8_t vs2, size_t vl) { return __riscv_vnmsub_vx_u8mf8_mu(vm, vd, rs1, vs2, vl); } -vuint8mf4_t test_vnmsub_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vnmsub_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs1, vuint8mf4_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u8mf4_mu(vm, vd, vs1, vs2, vl); } -vuint8mf4_t test_vnmsub_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { +vuint8mf4_t test_vnmsub_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, uint8_t rs1, + vuint8mf4_t vs2, size_t vl) { return __riscv_vnmsub_vx_u8mf4_mu(vm, vd, rs1, vs2, vl); } -vuint8mf2_t test_vnmsub_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vnmsub_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs1, vuint8mf2_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u8mf2_mu(vm, vd, vs1, vs2, vl); } -vuint8mf2_t test_vnmsub_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { +vuint8mf2_t test_vnmsub_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, uint8_t rs1, + vuint8mf2_t vs2, size_t vl) { return __riscv_vnmsub_vx_u8mf2_mu(vm, vd, rs1, vs2, vl); } -vuint8m1_t test_vnmsub_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vnmsub_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vnmsub_vv_u8m1_mu(vm, vd, vs1, vs2, vl); } -vuint8m1_t test_vnmsub_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { +vuint8m1_t test_vnmsub_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, uint8_t rs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vnmsub_vx_u8m1_mu(vm, vd, rs1, vs2, vl); } -vuint8m2_t test_vnmsub_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vnmsub_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vnmsub_vv_u8m2_mu(vm, vd, vs1, vs2, vl); } -vuint8m2_t test_vnmsub_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { +vuint8m2_t test_vnmsub_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, uint8_t rs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vnmsub_vx_u8m2_mu(vm, vd, rs1, vs2, vl); } -vuint8m4_t test_vnmsub_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vnmsub_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vnmsub_vv_u8m4_mu(vm, vd, vs1, vs2, vl); } -vuint8m4_t test_vnmsub_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { +vuint8m4_t test_vnmsub_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, uint8_t rs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vnmsub_vx_u8m4_mu(vm, vd, rs1, vs2, vl); } -vuint8m8_t test_vnmsub_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vnmsub_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs1, + vuint8m8_t vs2, size_t vl) { return __riscv_vnmsub_vv_u8m8_mu(vm, vd, vs1, vs2, vl); } -vuint8m8_t test_vnmsub_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, vuint8m8_t vs2, size_t vl) { +vuint8m8_t test_vnmsub_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, uint8_t rs1, + vuint8m8_t vs2, size_t vl) { return __riscv_vnmsub_vx_u8m8_mu(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vnmsub_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vnmsub_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u16mf4_mu(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vnmsub_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { +vuint16mf4_t test_vnmsub_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + uint16_t rs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vnmsub_vx_u16mf4_mu(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vnmsub_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vnmsub_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u16mf2_mu(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vnmsub_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { +vuint16mf2_t test_vnmsub_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + uint16_t rs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vnmsub_vx_u16mf2_mu(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vnmsub_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vnmsub_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs1, vuint16m1_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u16m1_mu(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vnmsub_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { +vuint16m1_t test_vnmsub_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, uint16_t rs1, + vuint16m1_t vs2, size_t vl) { return __riscv_vnmsub_vx_u16m1_mu(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vnmsub_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vnmsub_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs1, vuint16m2_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u16m2_mu(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vnmsub_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { +vuint16m2_t test_vnmsub_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, uint16_t rs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vnmsub_vx_u16m2_mu(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vnmsub_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vnmsub_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs1, vuint16m4_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u16m4_mu(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vnmsub_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { +vuint16m4_t test_vnmsub_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, uint16_t rs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vnmsub_vx_u16m4_mu(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vnmsub_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vnmsub_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs1, vuint16m8_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u16m8_mu(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vnmsub_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, vuint16m8_t vs2, size_t vl) { +vuint16m8_t test_vnmsub_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, uint16_t rs1, + vuint16m8_t vs2, size_t vl) { return __riscv_vnmsub_vx_u16m8_mu(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vnmsub_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vnmsub_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u32mf2_mu(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vnmsub_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { +vuint32mf2_t test_vnmsub_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + uint32_t rs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vnmsub_vx_u32mf2_mu(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vnmsub_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vnmsub_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs1, vuint32m1_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u32m1_mu(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vnmsub_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { +vuint32m1_t test_vnmsub_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, uint32_t rs1, + vuint32m1_t vs2, size_t vl) { return __riscv_vnmsub_vx_u32m1_mu(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vnmsub_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vnmsub_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs1, vuint32m2_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u32m2_mu(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vnmsub_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { +vuint32m2_t test_vnmsub_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, uint32_t rs1, + vuint32m2_t vs2, size_t vl) { return __riscv_vnmsub_vx_u32m2_mu(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vnmsub_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vnmsub_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs1, vuint32m4_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u32m4_mu(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vnmsub_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { +vuint32m4_t test_vnmsub_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, uint32_t rs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vnmsub_vx_u32m4_mu(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vnmsub_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vnmsub_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs1, vuint32m8_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u32m8_mu(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vnmsub_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, vuint32m8_t vs2, size_t vl) { +vuint32m8_t test_vnmsub_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, uint32_t rs1, + vuint32m8_t vs2, size_t vl) { return __riscv_vnmsub_vx_u32m8_mu(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vnmsub_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vnmsub_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs1, vuint64m1_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u64m1_mu(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vnmsub_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, vuint64m1_t vs2, size_t vl) { +vuint64m1_t test_vnmsub_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, uint64_t rs1, + vuint64m1_t vs2, size_t vl) { return __riscv_vnmsub_vx_u64m1_mu(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vnmsub_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vnmsub_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs1, vuint64m2_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u64m2_mu(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vnmsub_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, vuint64m2_t vs2, size_t vl) { +vuint64m2_t test_vnmsub_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, uint64_t rs1, + vuint64m2_t vs2, size_t vl) { return __riscv_vnmsub_vx_u64m2_mu(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vnmsub_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vnmsub_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs1, vuint64m4_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u64m4_mu(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vnmsub_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, vuint64m4_t vs2, size_t vl) { +vuint64m4_t test_vnmsub_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, uint64_t rs1, + vuint64m4_t vs2, size_t vl) { return __riscv_vnmsub_vx_u64m4_mu(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vnmsub_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vnmsub_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs1, vuint64m8_t vs2, + size_t vl) { return __riscv_vnmsub_vv_u64m8_mu(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vnmsub_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, vuint64m8_t vs2, size_t vl) { +vuint64m8_t test_vnmsub_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, uint64_t rs1, + vuint64m8_t vs2, size_t vl) { return __riscv_vnmsub_vx_u64m8_mu(vm, vd, rs1, vs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vnot.c b/auto-generated/policy_funcs/llvm-api-tests/vnot.c index a8bdd7e0a..15576f509 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vnot.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vnot.c @@ -121,11 +121,13 @@ vuint8m8_t test_vnot_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs, size_t vl) { return __riscv_vnot_v_u8m8_tu(vd, vs, vl); } -vuint16mf4_t test_vnot_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs, size_t vl) { +vuint16mf4_t test_vnot_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs, + size_t vl) { return __riscv_vnot_v_u16mf4_tu(vd, vs, vl); } -vuint16mf2_t test_vnot_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs, size_t vl) { +vuint16mf2_t test_vnot_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs, + size_t vl) { return __riscv_vnot_v_u16mf2_tu(vd, vs, vl); } @@ -145,7 +147,8 @@ vuint16m8_t test_vnot_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs, size_t vl) { return __riscv_vnot_v_u16m8_tu(vd, vs, vl); } -vuint32mf2_t test_vnot_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs, size_t vl) { +vuint32mf2_t test_vnot_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs, + size_t vl) { return __riscv_vnot_v_u32mf2_tu(vd, vs, vl); } @@ -181,530 +184,662 @@ vuint64m8_t test_vnot_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs, size_t vl) { return __riscv_vnot_v_u64m8_tu(vd, vs, vl); } -vint8mf8_t test_vnot_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs, size_t vl) { +vint8mf8_t test_vnot_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs, + size_t vl) { return __riscv_vnot_v_i8mf8_tum(vm, vd, vs, vl); } -vint8mf4_t test_vnot_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs, size_t vl) { +vint8mf4_t test_vnot_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs, + size_t vl) { return __riscv_vnot_v_i8mf4_tum(vm, vd, vs, vl); } -vint8mf2_t test_vnot_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs, size_t vl) { +vint8mf2_t test_vnot_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs, + size_t vl) { return __riscv_vnot_v_i8mf2_tum(vm, vd, vs, vl); } -vint8m1_t test_vnot_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs, size_t vl) { +vint8m1_t test_vnot_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs, + size_t vl) { return __riscv_vnot_v_i8m1_tum(vm, vd, vs, vl); } -vint8m2_t test_vnot_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs, size_t vl) { +vint8m2_t test_vnot_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs, + size_t vl) { return __riscv_vnot_v_i8m2_tum(vm, vd, vs, vl); } -vint8m4_t test_vnot_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs, size_t vl) { +vint8m4_t test_vnot_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs, + size_t vl) { return __riscv_vnot_v_i8m4_tum(vm, vd, vs, vl); } -vint8m8_t test_vnot_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs, size_t vl) { +vint8m8_t test_vnot_v_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs, + size_t vl) { return __riscv_vnot_v_i8m8_tum(vm, vd, vs, vl); } -vint16mf4_t test_vnot_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs, size_t vl) { +vint16mf4_t test_vnot_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs, + size_t vl) { return __riscv_vnot_v_i16mf4_tum(vm, vd, vs, vl); } -vint16mf2_t test_vnot_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs, size_t vl) { +vint16mf2_t test_vnot_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs, + size_t vl) { return __riscv_vnot_v_i16mf2_tum(vm, vd, vs, vl); } -vint16m1_t test_vnot_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs, size_t vl) { +vint16m1_t test_vnot_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs, + size_t vl) { return __riscv_vnot_v_i16m1_tum(vm, vd, vs, vl); } -vint16m2_t test_vnot_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs, size_t vl) { +vint16m2_t test_vnot_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs, + size_t vl) { return __riscv_vnot_v_i16m2_tum(vm, vd, vs, vl); } -vint16m4_t test_vnot_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs, size_t vl) { +vint16m4_t test_vnot_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs, + size_t vl) { return __riscv_vnot_v_i16m4_tum(vm, vd, vs, vl); } -vint16m8_t test_vnot_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs, size_t vl) { +vint16m8_t test_vnot_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs, + size_t vl) { return __riscv_vnot_v_i16m8_tum(vm, vd, vs, vl); } -vint32mf2_t test_vnot_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs, size_t vl) { +vint32mf2_t test_vnot_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs, + size_t vl) { return __riscv_vnot_v_i32mf2_tum(vm, vd, vs, vl); } -vint32m1_t test_vnot_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs, size_t vl) { +vint32m1_t test_vnot_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs, + size_t vl) { return __riscv_vnot_v_i32m1_tum(vm, vd, vs, vl); } -vint32m2_t test_vnot_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs, size_t vl) { +vint32m2_t test_vnot_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs, + size_t vl) { return __riscv_vnot_v_i32m2_tum(vm, vd, vs, vl); } -vint32m4_t test_vnot_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs, size_t vl) { +vint32m4_t test_vnot_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs, + size_t vl) { return __riscv_vnot_v_i32m4_tum(vm, vd, vs, vl); } -vint32m8_t test_vnot_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs, size_t vl) { +vint32m8_t test_vnot_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs, + size_t vl) { return __riscv_vnot_v_i32m8_tum(vm, vd, vs, vl); } -vint64m1_t test_vnot_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs, size_t vl) { +vint64m1_t test_vnot_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs, + size_t vl) { return __riscv_vnot_v_i64m1_tum(vm, vd, vs, vl); } -vint64m2_t test_vnot_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs, size_t vl) { +vint64m2_t test_vnot_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs, + size_t vl) { return __riscv_vnot_v_i64m2_tum(vm, vd, vs, vl); } -vint64m4_t test_vnot_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs, size_t vl) { +vint64m4_t test_vnot_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs, + size_t vl) { return __riscv_vnot_v_i64m4_tum(vm, vd, vs, vl); } -vint64m8_t test_vnot_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs, size_t vl) { +vint64m8_t test_vnot_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs, + size_t vl) { return __riscv_vnot_v_i64m8_tum(vm, vd, vs, vl); } -vuint8mf8_t test_vnot_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs, size_t vl) { +vuint8mf8_t test_vnot_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs, + size_t vl) { return __riscv_vnot_v_u8mf8_tum(vm, vd, vs, vl); } -vuint8mf4_t test_vnot_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs, size_t vl) { +vuint8mf4_t test_vnot_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs, + size_t vl) { return __riscv_vnot_v_u8mf4_tum(vm, vd, vs, vl); } -vuint8mf2_t test_vnot_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs, size_t vl) { +vuint8mf2_t test_vnot_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs, + size_t vl) { return __riscv_vnot_v_u8mf2_tum(vm, vd, vs, vl); } -vuint8m1_t test_vnot_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs, size_t vl) { +vuint8m1_t test_vnot_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs, + size_t vl) { return __riscv_vnot_v_u8m1_tum(vm, vd, vs, vl); } -vuint8m2_t test_vnot_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs, size_t vl) { +vuint8m2_t test_vnot_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs, + size_t vl) { return __riscv_vnot_v_u8m2_tum(vm, vd, vs, vl); } -vuint8m4_t test_vnot_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs, size_t vl) { +vuint8m4_t test_vnot_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs, + size_t vl) { return __riscv_vnot_v_u8m4_tum(vm, vd, vs, vl); } -vuint8m8_t test_vnot_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs, size_t vl) { +vuint8m8_t test_vnot_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs, + size_t vl) { return __riscv_vnot_v_u8m8_tum(vm, vd, vs, vl); } -vuint16mf4_t test_vnot_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs, size_t vl) { +vuint16mf4_t test_vnot_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs, size_t vl) { return __riscv_vnot_v_u16mf4_tum(vm, vd, vs, vl); } -vuint16mf2_t test_vnot_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs, size_t vl) { +vuint16mf2_t test_vnot_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs, size_t vl) { return __riscv_vnot_v_u16mf2_tum(vm, vd, vs, vl); } -vuint16m1_t test_vnot_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs, size_t vl) { +vuint16m1_t test_vnot_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs, + size_t vl) { return __riscv_vnot_v_u16m1_tum(vm, vd, vs, vl); } -vuint16m2_t test_vnot_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs, size_t vl) { +vuint16m2_t test_vnot_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs, + size_t vl) { return __riscv_vnot_v_u16m2_tum(vm, vd, vs, vl); } -vuint16m4_t test_vnot_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs, size_t vl) { +vuint16m4_t test_vnot_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs, + size_t vl) { return __riscv_vnot_v_u16m4_tum(vm, vd, vs, vl); } -vuint16m8_t test_vnot_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs, size_t vl) { +vuint16m8_t test_vnot_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs, + size_t vl) { return __riscv_vnot_v_u16m8_tum(vm, vd, vs, vl); } -vuint32mf2_t test_vnot_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs, size_t vl) { +vuint32mf2_t test_vnot_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs, size_t vl) { return __riscv_vnot_v_u32mf2_tum(vm, vd, vs, vl); } -vuint32m1_t test_vnot_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs, size_t vl) { +vuint32m1_t test_vnot_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs, + size_t vl) { return __riscv_vnot_v_u32m1_tum(vm, vd, vs, vl); } -vuint32m2_t test_vnot_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs, size_t vl) { +vuint32m2_t test_vnot_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs, + size_t vl) { return __riscv_vnot_v_u32m2_tum(vm, vd, vs, vl); } -vuint32m4_t test_vnot_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs, size_t vl) { +vuint32m4_t test_vnot_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs, + size_t vl) { return __riscv_vnot_v_u32m4_tum(vm, vd, vs, vl); } -vuint32m8_t test_vnot_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs, size_t vl) { +vuint32m8_t test_vnot_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs, + size_t vl) { return __riscv_vnot_v_u32m8_tum(vm, vd, vs, vl); } -vuint64m1_t test_vnot_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs, size_t vl) { +vuint64m1_t test_vnot_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs, + size_t vl) { return __riscv_vnot_v_u64m1_tum(vm, vd, vs, vl); } -vuint64m2_t test_vnot_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs, size_t vl) { +vuint64m2_t test_vnot_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs, + size_t vl) { return __riscv_vnot_v_u64m2_tum(vm, vd, vs, vl); } -vuint64m4_t test_vnot_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs, size_t vl) { +vuint64m4_t test_vnot_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs, + size_t vl) { return __riscv_vnot_v_u64m4_tum(vm, vd, vs, vl); } -vuint64m8_t test_vnot_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs, size_t vl) { +vuint64m8_t test_vnot_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs, + size_t vl) { return __riscv_vnot_v_u64m8_tum(vm, vd, vs, vl); } -vint8mf8_t test_vnot_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs, size_t vl) { +vint8mf8_t test_vnot_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs, + size_t vl) { return __riscv_vnot_v_i8mf8_tumu(vm, vd, vs, vl); } -vint8mf4_t test_vnot_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs, size_t vl) { +vint8mf4_t test_vnot_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs, + size_t vl) { return __riscv_vnot_v_i8mf4_tumu(vm, vd, vs, vl); } -vint8mf2_t test_vnot_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs, size_t vl) { +vint8mf2_t test_vnot_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs, + size_t vl) { return __riscv_vnot_v_i8mf2_tumu(vm, vd, vs, vl); } -vint8m1_t test_vnot_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs, size_t vl) { +vint8m1_t test_vnot_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs, + size_t vl) { return __riscv_vnot_v_i8m1_tumu(vm, vd, vs, vl); } -vint8m2_t test_vnot_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs, size_t vl) { +vint8m2_t test_vnot_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs, + size_t vl) { return __riscv_vnot_v_i8m2_tumu(vm, vd, vs, vl); } -vint8m4_t test_vnot_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs, size_t vl) { +vint8m4_t test_vnot_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs, + size_t vl) { return __riscv_vnot_v_i8m4_tumu(vm, vd, vs, vl); } -vint8m8_t test_vnot_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs, size_t vl) { +vint8m8_t test_vnot_v_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs, + size_t vl) { return __riscv_vnot_v_i8m8_tumu(vm, vd, vs, vl); } -vint16mf4_t test_vnot_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs, size_t vl) { +vint16mf4_t test_vnot_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs, size_t vl) { return __riscv_vnot_v_i16mf4_tumu(vm, vd, vs, vl); } -vint16mf2_t test_vnot_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs, size_t vl) { +vint16mf2_t test_vnot_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs, size_t vl) { return __riscv_vnot_v_i16mf2_tumu(vm, vd, vs, vl); } -vint16m1_t test_vnot_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs, size_t vl) { +vint16m1_t test_vnot_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs, + size_t vl) { return __riscv_vnot_v_i16m1_tumu(vm, vd, vs, vl); } -vint16m2_t test_vnot_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs, size_t vl) { +vint16m2_t test_vnot_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs, + size_t vl) { return __riscv_vnot_v_i16m2_tumu(vm, vd, vs, vl); } -vint16m4_t test_vnot_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs, size_t vl) { +vint16m4_t test_vnot_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs, + size_t vl) { return __riscv_vnot_v_i16m4_tumu(vm, vd, vs, vl); } -vint16m8_t test_vnot_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs, size_t vl) { +vint16m8_t test_vnot_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs, + size_t vl) { return __riscv_vnot_v_i16m8_tumu(vm, vd, vs, vl); } -vint32mf2_t test_vnot_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs, size_t vl) { +vint32mf2_t test_vnot_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs, size_t vl) { return __riscv_vnot_v_i32mf2_tumu(vm, vd, vs, vl); } -vint32m1_t test_vnot_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs, size_t vl) { +vint32m1_t test_vnot_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs, + size_t vl) { return __riscv_vnot_v_i32m1_tumu(vm, vd, vs, vl); } -vint32m2_t test_vnot_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs, size_t vl) { +vint32m2_t test_vnot_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs, + size_t vl) { return __riscv_vnot_v_i32m2_tumu(vm, vd, vs, vl); } -vint32m4_t test_vnot_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs, size_t vl) { +vint32m4_t test_vnot_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs, + size_t vl) { return __riscv_vnot_v_i32m4_tumu(vm, vd, vs, vl); } -vint32m8_t test_vnot_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs, size_t vl) { +vint32m8_t test_vnot_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs, + size_t vl) { return __riscv_vnot_v_i32m8_tumu(vm, vd, vs, vl); } -vint64m1_t test_vnot_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs, size_t vl) { +vint64m1_t test_vnot_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs, + size_t vl) { return __riscv_vnot_v_i64m1_tumu(vm, vd, vs, vl); } -vint64m2_t test_vnot_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs, size_t vl) { +vint64m2_t test_vnot_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs, + size_t vl) { return __riscv_vnot_v_i64m2_tumu(vm, vd, vs, vl); } -vint64m4_t test_vnot_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs, size_t vl) { +vint64m4_t test_vnot_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs, + size_t vl) { return __riscv_vnot_v_i64m4_tumu(vm, vd, vs, vl); } -vint64m8_t test_vnot_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs, size_t vl) { +vint64m8_t test_vnot_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs, + size_t vl) { return __riscv_vnot_v_i64m8_tumu(vm, vd, vs, vl); } -vuint8mf8_t test_vnot_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs, size_t vl) { +vuint8mf8_t test_vnot_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs, + size_t vl) { return __riscv_vnot_v_u8mf8_tumu(vm, vd, vs, vl); } -vuint8mf4_t test_vnot_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs, size_t vl) { +vuint8mf4_t test_vnot_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs, + size_t vl) { return __riscv_vnot_v_u8mf4_tumu(vm, vd, vs, vl); } -vuint8mf2_t test_vnot_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs, size_t vl) { +vuint8mf2_t test_vnot_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs, + size_t vl) { return __riscv_vnot_v_u8mf2_tumu(vm, vd, vs, vl); } -vuint8m1_t test_vnot_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs, size_t vl) { +vuint8m1_t test_vnot_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs, + size_t vl) { return __riscv_vnot_v_u8m1_tumu(vm, vd, vs, vl); } -vuint8m2_t test_vnot_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs, size_t vl) { +vuint8m2_t test_vnot_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs, + size_t vl) { return __riscv_vnot_v_u8m2_tumu(vm, vd, vs, vl); } -vuint8m4_t test_vnot_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs, size_t vl) { +vuint8m4_t test_vnot_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs, + size_t vl) { return __riscv_vnot_v_u8m4_tumu(vm, vd, vs, vl); } -vuint8m8_t test_vnot_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs, size_t vl) { +vuint8m8_t test_vnot_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs, + size_t vl) { return __riscv_vnot_v_u8m8_tumu(vm, vd, vs, vl); } -vuint16mf4_t test_vnot_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs, size_t vl) { +vuint16mf4_t test_vnot_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs, size_t vl) { return __riscv_vnot_v_u16mf4_tumu(vm, vd, vs, vl); } -vuint16mf2_t test_vnot_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs, size_t vl) { +vuint16mf2_t test_vnot_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs, size_t vl) { return __riscv_vnot_v_u16mf2_tumu(vm, vd, vs, vl); } -vuint16m1_t test_vnot_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs, size_t vl) { +vuint16m1_t test_vnot_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs, + size_t vl) { return __riscv_vnot_v_u16m1_tumu(vm, vd, vs, vl); } -vuint16m2_t test_vnot_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs, size_t vl) { +vuint16m2_t test_vnot_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs, + size_t vl) { return __riscv_vnot_v_u16m2_tumu(vm, vd, vs, vl); } -vuint16m4_t test_vnot_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs, size_t vl) { +vuint16m4_t test_vnot_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs, + size_t vl) { return __riscv_vnot_v_u16m4_tumu(vm, vd, vs, vl); } -vuint16m8_t test_vnot_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs, size_t vl) { +vuint16m8_t test_vnot_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs, + size_t vl) { return __riscv_vnot_v_u16m8_tumu(vm, vd, vs, vl); } -vuint32mf2_t test_vnot_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs, size_t vl) { +vuint32mf2_t test_vnot_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs, size_t vl) { return __riscv_vnot_v_u32mf2_tumu(vm, vd, vs, vl); } -vuint32m1_t test_vnot_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs, size_t vl) { +vuint32m1_t test_vnot_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs, + size_t vl) { return __riscv_vnot_v_u32m1_tumu(vm, vd, vs, vl); } -vuint32m2_t test_vnot_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs, size_t vl) { +vuint32m2_t test_vnot_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs, + size_t vl) { return __riscv_vnot_v_u32m2_tumu(vm, vd, vs, vl); } -vuint32m4_t test_vnot_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs, size_t vl) { +vuint32m4_t test_vnot_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs, + size_t vl) { return __riscv_vnot_v_u32m4_tumu(vm, vd, vs, vl); } -vuint32m8_t test_vnot_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs, size_t vl) { +vuint32m8_t test_vnot_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs, + size_t vl) { return __riscv_vnot_v_u32m8_tumu(vm, vd, vs, vl); } -vuint64m1_t test_vnot_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs, size_t vl) { +vuint64m1_t test_vnot_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs, + size_t vl) { return __riscv_vnot_v_u64m1_tumu(vm, vd, vs, vl); } -vuint64m2_t test_vnot_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs, size_t vl) { +vuint64m2_t test_vnot_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs, + size_t vl) { return __riscv_vnot_v_u64m2_tumu(vm, vd, vs, vl); } -vuint64m4_t test_vnot_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs, size_t vl) { +vuint64m4_t test_vnot_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs, + size_t vl) { return __riscv_vnot_v_u64m4_tumu(vm, vd, vs, vl); } -vuint64m8_t test_vnot_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs, size_t vl) { +vuint64m8_t test_vnot_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs, + size_t vl) { return __riscv_vnot_v_u64m8_tumu(vm, vd, vs, vl); } -vint8mf8_t test_vnot_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs, size_t vl) { +vint8mf8_t test_vnot_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs, + size_t vl) { return __riscv_vnot_v_i8mf8_mu(vm, vd, vs, vl); } -vint8mf4_t test_vnot_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs, size_t vl) { +vint8mf4_t test_vnot_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs, + size_t vl) { return __riscv_vnot_v_i8mf4_mu(vm, vd, vs, vl); } -vint8mf2_t test_vnot_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs, size_t vl) { +vint8mf2_t test_vnot_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs, + size_t vl) { return __riscv_vnot_v_i8mf2_mu(vm, vd, vs, vl); } -vint8m1_t test_vnot_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs, size_t vl) { +vint8m1_t test_vnot_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs, + size_t vl) { return __riscv_vnot_v_i8m1_mu(vm, vd, vs, vl); } -vint8m2_t test_vnot_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs, size_t vl) { +vint8m2_t test_vnot_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs, + size_t vl) { return __riscv_vnot_v_i8m2_mu(vm, vd, vs, vl); } -vint8m4_t test_vnot_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs, size_t vl) { +vint8m4_t test_vnot_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs, + size_t vl) { return __riscv_vnot_v_i8m4_mu(vm, vd, vs, vl); } -vint8m8_t test_vnot_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs, size_t vl) { +vint8m8_t test_vnot_v_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs, + size_t vl) { return __riscv_vnot_v_i8m8_mu(vm, vd, vs, vl); } -vint16mf4_t test_vnot_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs, size_t vl) { +vint16mf4_t test_vnot_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs, + size_t vl) { return __riscv_vnot_v_i16mf4_mu(vm, vd, vs, vl); } -vint16mf2_t test_vnot_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs, size_t vl) { +vint16mf2_t test_vnot_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs, + size_t vl) { return __riscv_vnot_v_i16mf2_mu(vm, vd, vs, vl); } -vint16m1_t test_vnot_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs, size_t vl) { +vint16m1_t test_vnot_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs, + size_t vl) { return __riscv_vnot_v_i16m1_mu(vm, vd, vs, vl); } -vint16m2_t test_vnot_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs, size_t vl) { +vint16m2_t test_vnot_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs, + size_t vl) { return __riscv_vnot_v_i16m2_mu(vm, vd, vs, vl); } -vint16m4_t test_vnot_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs, size_t vl) { +vint16m4_t test_vnot_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs, + size_t vl) { return __riscv_vnot_v_i16m4_mu(vm, vd, vs, vl); } -vint16m8_t test_vnot_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs, size_t vl) { +vint16m8_t test_vnot_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs, + size_t vl) { return __riscv_vnot_v_i16m8_mu(vm, vd, vs, vl); } -vint32mf2_t test_vnot_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs, size_t vl) { +vint32mf2_t test_vnot_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs, + size_t vl) { return __riscv_vnot_v_i32mf2_mu(vm, vd, vs, vl); } -vint32m1_t test_vnot_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs, size_t vl) { +vint32m1_t test_vnot_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs, + size_t vl) { return __riscv_vnot_v_i32m1_mu(vm, vd, vs, vl); } -vint32m2_t test_vnot_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs, size_t vl) { +vint32m2_t test_vnot_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs, + size_t vl) { return __riscv_vnot_v_i32m2_mu(vm, vd, vs, vl); } -vint32m4_t test_vnot_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs, size_t vl) { +vint32m4_t test_vnot_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs, + size_t vl) { return __riscv_vnot_v_i32m4_mu(vm, vd, vs, vl); } -vint32m8_t test_vnot_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs, size_t vl) { +vint32m8_t test_vnot_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs, + size_t vl) { return __riscv_vnot_v_i32m8_mu(vm, vd, vs, vl); } -vint64m1_t test_vnot_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs, size_t vl) { +vint64m1_t test_vnot_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs, + size_t vl) { return __riscv_vnot_v_i64m1_mu(vm, vd, vs, vl); } -vint64m2_t test_vnot_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs, size_t vl) { +vint64m2_t test_vnot_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs, + size_t vl) { return __riscv_vnot_v_i64m2_mu(vm, vd, vs, vl); } -vint64m4_t test_vnot_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs, size_t vl) { +vint64m4_t test_vnot_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs, + size_t vl) { return __riscv_vnot_v_i64m4_mu(vm, vd, vs, vl); } -vint64m8_t test_vnot_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs, size_t vl) { +vint64m8_t test_vnot_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs, + size_t vl) { return __riscv_vnot_v_i64m8_mu(vm, vd, vs, vl); } -vuint8mf8_t test_vnot_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs, size_t vl) { +vuint8mf8_t test_vnot_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs, + size_t vl) { return __riscv_vnot_v_u8mf8_mu(vm, vd, vs, vl); } -vuint8mf4_t test_vnot_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs, size_t vl) { +vuint8mf4_t test_vnot_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs, + size_t vl) { return __riscv_vnot_v_u8mf4_mu(vm, vd, vs, vl); } -vuint8mf2_t test_vnot_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs, size_t vl) { +vuint8mf2_t test_vnot_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs, + size_t vl) { return __riscv_vnot_v_u8mf2_mu(vm, vd, vs, vl); } -vuint8m1_t test_vnot_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs, size_t vl) { +vuint8m1_t test_vnot_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs, + size_t vl) { return __riscv_vnot_v_u8m1_mu(vm, vd, vs, vl); } -vuint8m2_t test_vnot_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs, size_t vl) { +vuint8m2_t test_vnot_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs, + size_t vl) { return __riscv_vnot_v_u8m2_mu(vm, vd, vs, vl); } -vuint8m4_t test_vnot_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs, size_t vl) { +vuint8m4_t test_vnot_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs, + size_t vl) { return __riscv_vnot_v_u8m4_mu(vm, vd, vs, vl); } -vuint8m8_t test_vnot_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs, size_t vl) { +vuint8m8_t test_vnot_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs, + size_t vl) { return __riscv_vnot_v_u8m8_mu(vm, vd, vs, vl); } -vuint16mf4_t test_vnot_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs, size_t vl) { +vuint16mf4_t test_vnot_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs, size_t vl) { return __riscv_vnot_v_u16mf4_mu(vm, vd, vs, vl); } -vuint16mf2_t test_vnot_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs, size_t vl) { +vuint16mf2_t test_vnot_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs, size_t vl) { return __riscv_vnot_v_u16mf2_mu(vm, vd, vs, vl); } -vuint16m1_t test_vnot_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs, size_t vl) { +vuint16m1_t test_vnot_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs, + size_t vl) { return __riscv_vnot_v_u16m1_mu(vm, vd, vs, vl); } -vuint16m2_t test_vnot_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs, size_t vl) { +vuint16m2_t test_vnot_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs, + size_t vl) { return __riscv_vnot_v_u16m2_mu(vm, vd, vs, vl); } -vuint16m4_t test_vnot_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs, size_t vl) { +vuint16m4_t test_vnot_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs, + size_t vl) { return __riscv_vnot_v_u16m4_mu(vm, vd, vs, vl); } -vuint16m8_t test_vnot_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs, size_t vl) { +vuint16m8_t test_vnot_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs, + size_t vl) { return __riscv_vnot_v_u16m8_mu(vm, vd, vs, vl); } -vuint32mf2_t test_vnot_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs, size_t vl) { +vuint32mf2_t test_vnot_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs, size_t vl) { return __riscv_vnot_v_u32mf2_mu(vm, vd, vs, vl); } -vuint32m1_t test_vnot_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs, size_t vl) { +vuint32m1_t test_vnot_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs, + size_t vl) { return __riscv_vnot_v_u32m1_mu(vm, vd, vs, vl); } -vuint32m2_t test_vnot_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs, size_t vl) { +vuint32m2_t test_vnot_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs, + size_t vl) { return __riscv_vnot_v_u32m2_mu(vm, vd, vs, vl); } -vuint32m4_t test_vnot_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs, size_t vl) { +vuint32m4_t test_vnot_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs, + size_t vl) { return __riscv_vnot_v_u32m4_mu(vm, vd, vs, vl); } -vuint32m8_t test_vnot_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs, size_t vl) { +vuint32m8_t test_vnot_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs, + size_t vl) { return __riscv_vnot_v_u32m8_mu(vm, vd, vs, vl); } -vuint64m1_t test_vnot_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs, size_t vl) { +vuint64m1_t test_vnot_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs, + size_t vl) { return __riscv_vnot_v_u64m1_mu(vm, vd, vs, vl); } -vuint64m2_t test_vnot_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs, size_t vl) { +vuint64m2_t test_vnot_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs, + size_t vl) { return __riscv_vnot_v_u64m2_mu(vm, vd, vs, vl); } -vuint64m4_t test_vnot_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs, size_t vl) { +vuint64m4_t test_vnot_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs, + size_t vl) { return __riscv_vnot_v_u64m4_mu(vm, vd, vs, vl); } -vuint64m8_t test_vnot_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs, size_t vl) { +vuint64m8_t test_vnot_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs, + size_t vl) { return __riscv_vnot_v_u64m8_mu(vm, vd, vs, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vnsra.c b/auto-generated/policy_funcs/llvm-api-tests/vnsra.c index 044d8eef0..49629eff0 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vnsra.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vnsra.c @@ -5,482 +5,613 @@ #include -vint8mf8_t test_vnsra_wv_i8mf8_tu(vint8mf8_t vd, vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vnsra_wv_i8mf8_tu(vint8mf8_t vd, vint16mf4_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vnsra_wv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vnsra_wx_i8mf8_tu(vint8mf8_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { +vint8mf8_t test_vnsra_wx_i8mf8_tu(vint8mf8_t vd, vint16mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vnsra_wx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vnsra_wv_i8mf4_tu(vint8mf4_t vd, vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vnsra_wv_i8mf4_tu(vint8mf4_t vd, vint16mf2_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vnsra_wv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vnsra_wx_i8mf4_tu(vint8mf4_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { +vint8mf4_t test_vnsra_wx_i8mf4_tu(vint8mf4_t vd, vint16mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vnsra_wx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vnsra_wv_i8mf2_tu(vint8mf2_t vd, vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vnsra_wv_i8mf2_tu(vint8mf2_t vd, vint16m1_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vnsra_wv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vnsra_wx_i8mf2_tu(vint8mf2_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { +vint8mf2_t test_vnsra_wx_i8mf2_tu(vint8mf2_t vd, vint16m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vnsra_wx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vnsra_wv_i8m1_tu(vint8m1_t vd, vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { +vint8m1_t test_vnsra_wv_i8m1_tu(vint8m1_t vd, vint16m2_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vnsra_wv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vnsra_wx_i8m1_tu(vint8m1_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { +vint8m1_t test_vnsra_wx_i8m1_tu(vint8m1_t vd, vint16m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vnsra_wx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vnsra_wv_i8m2_tu(vint8m2_t vd, vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { +vint8m2_t test_vnsra_wv_i8m2_tu(vint8m2_t vd, vint16m4_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vnsra_wv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vnsra_wx_i8m2_tu(vint8m2_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { +vint8m2_t test_vnsra_wx_i8m2_tu(vint8m2_t vd, vint16m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vnsra_wx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vnsra_wv_i8m4_tu(vint8m4_t vd, vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { +vint8m4_t test_vnsra_wv_i8m4_tu(vint8m4_t vd, vint16m8_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vnsra_wv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vnsra_wx_i8m4_tu(vint8m4_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { +vint8m4_t test_vnsra_wx_i8m4_tu(vint8m4_t vd, vint16m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vnsra_wx_i8m4_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vnsra_wv_i16mf4_tu(vint16mf4_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vnsra_wv_i16mf4_tu(vint16mf4_t vd, vint32mf2_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vnsra_wv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vnsra_wx_i16mf4_tu(vint16mf4_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { +vint16mf4_t test_vnsra_wx_i16mf4_tu(vint16mf4_t vd, vint32mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vnsra_wx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vnsra_wv_i16mf2_tu(vint16mf2_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vnsra_wv_i16mf2_tu(vint16mf2_t vd, vint32m1_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vnsra_wv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vnsra_wx_i16mf2_tu(vint16mf2_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { +vint16mf2_t test_vnsra_wx_i16mf2_tu(vint16mf2_t vd, vint32m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vnsra_wx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vnsra_wv_i16m1_tu(vint16m1_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vnsra_wv_i16m1_tu(vint16m1_t vd, vint32m2_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vnsra_wv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vnsra_wx_i16m1_tu(vint16m1_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { +vint16m1_t test_vnsra_wx_i16m1_tu(vint16m1_t vd, vint32m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vnsra_wx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vnsra_wv_i16m2_tu(vint16m2_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vnsra_wv_i16m2_tu(vint16m2_t vd, vint32m4_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vnsra_wv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vnsra_wx_i16m2_tu(vint16m2_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { +vint16m2_t test_vnsra_wx_i16m2_tu(vint16m2_t vd, vint32m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vnsra_wx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vnsra_wv_i16m4_tu(vint16m4_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vnsra_wv_i16m4_tu(vint16m4_t vd, vint32m8_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vnsra_wv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vnsra_wx_i16m4_tu(vint16m4_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { +vint16m4_t test_vnsra_wx_i16m4_tu(vint16m4_t vd, vint32m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vnsra_wx_i16m4_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vnsra_wv_i32mf2_tu(vint32mf2_t vd, vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vnsra_wv_i32mf2_tu(vint32mf2_t vd, vint64m1_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vnsra_wv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vnsra_wx_i32mf2_tu(vint32mf2_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { +vint32mf2_t test_vnsra_wx_i32mf2_tu(vint32mf2_t vd, vint64m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vnsra_wx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vnsra_wv_i32m1_tu(vint32m1_t vd, vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { +vint32m1_t test_vnsra_wv_i32m1_tu(vint32m1_t vd, vint64m2_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vnsra_wv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vnsra_wx_i32m1_tu(vint32m1_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { +vint32m1_t test_vnsra_wx_i32m1_tu(vint32m1_t vd, vint64m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vnsra_wx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vnsra_wv_i32m2_tu(vint32m2_t vd, vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { +vint32m2_t test_vnsra_wv_i32m2_tu(vint32m2_t vd, vint64m4_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vnsra_wv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vnsra_wx_i32m2_tu(vint32m2_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { +vint32m2_t test_vnsra_wx_i32m2_tu(vint32m2_t vd, vint64m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vnsra_wx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vnsra_wv_i32m4_tu(vint32m4_t vd, vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { +vint32m4_t test_vnsra_wv_i32m4_tu(vint32m4_t vd, vint64m8_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vnsra_wv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vnsra_wx_i32m4_tu(vint32m4_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { +vint32m4_t test_vnsra_wx_i32m4_tu(vint32m4_t vd, vint64m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vnsra_wx_i32m4_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vnsra_wv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vnsra_wv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vnsra_wv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vnsra_wx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { +vint8mf8_t test_vnsra_wx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vnsra_wv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vnsra_wv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vnsra_wv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vnsra_wx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { +vint8mf4_t test_vnsra_wx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vnsra_wv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vnsra_wv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vnsra_wv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vnsra_wx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { +vint8mf2_t test_vnsra_wx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vnsra_wv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { +vint8m1_t test_vnsra_wv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vnsra_wv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vnsra_wx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { +vint8m1_t test_vnsra_wx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vnsra_wv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { +vint8m2_t test_vnsra_wv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vnsra_wv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vnsra_wx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { +vint8m2_t test_vnsra_wx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vnsra_wv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { +vint8m4_t test_vnsra_wv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vnsra_wv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vnsra_wx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { +vint8m4_t test_vnsra_wx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vnsra_wv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vnsra_wv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint32mf2_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vnsra_wv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vnsra_wx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { +vint16mf4_t test_vnsra_wx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vnsra_wx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vnsra_wv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vnsra_wv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint32m1_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vnsra_wv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vnsra_wx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { +vint16mf2_t test_vnsra_wx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vnsra_wx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vnsra_wv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vnsra_wv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vnsra_wv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vnsra_wx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { +vint16m1_t test_vnsra_wx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vnsra_wv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vnsra_wv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vnsra_wv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vnsra_wx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { +vint16m2_t test_vnsra_wx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vnsra_wv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vnsra_wv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vnsra_wv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vnsra_wx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { +vint16m4_t test_vnsra_wx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vnsra_wv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vnsra_wv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint64m1_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vnsra_wv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vnsra_wx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { +vint32mf2_t test_vnsra_wx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint64m1_t vs2, size_t rs1, size_t vl) { return __riscv_vnsra_wx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vnsra_wv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { +vint32m1_t test_vnsra_wv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vnsra_wv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vnsra_wx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { +vint32m1_t test_vnsra_wx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vnsra_wv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { +vint32m2_t test_vnsra_wv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vnsra_wv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vnsra_wx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { +vint32m2_t test_vnsra_wx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vnsra_wv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { +vint32m4_t test_vnsra_wv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vnsra_wv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vnsra_wx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { +vint32m4_t test_vnsra_wx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vnsra_wv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vnsra_wv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + vint16mf4_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vnsra_wv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vnsra_wx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { +vint8mf8_t test_vnsra_wx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + vint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vnsra_wx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vnsra_wv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vnsra_wv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + vint16mf2_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vnsra_wv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vnsra_wx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { +vint8mf4_t test_vnsra_wx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + vint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vnsra_wx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vnsra_wv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vnsra_wv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vnsra_wv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vnsra_wx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { +vint8mf2_t test_vnsra_wx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vnsra_wv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { +vint8m1_t test_vnsra_wv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vnsra_wv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vnsra_wx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { +vint8m1_t test_vnsra_wx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vnsra_wv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { +vint8m2_t test_vnsra_wv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vnsra_wv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vnsra_wx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { +vint8m2_t test_vnsra_wx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vnsra_wv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { +vint8m4_t test_vnsra_wv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vnsra_wv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vnsra_wx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { +vint8m4_t test_vnsra_wx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vnsra_wv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vnsra_wv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint32mf2_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vnsra_wv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vnsra_wx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { +vint16mf4_t test_vnsra_wx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vnsra_wx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vnsra_wv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vnsra_wv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint32m1_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vnsra_wv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vnsra_wx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { +vint16mf2_t test_vnsra_wx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vnsra_wx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vnsra_wv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vnsra_wv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vnsra_wv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vnsra_wx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { +vint16m1_t test_vnsra_wx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vnsra_wv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vnsra_wv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vnsra_wv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vnsra_wx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { +vint16m2_t test_vnsra_wx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vnsra_wv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vnsra_wv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vnsra_wv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vnsra_wx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { +vint16m4_t test_vnsra_wx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vnsra_wv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vnsra_wv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint64m1_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vnsra_wv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vnsra_wx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { +vint32mf2_t test_vnsra_wx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint64m1_t vs2, size_t rs1, size_t vl) { return __riscv_vnsra_wx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vnsra_wv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { +vint32m1_t test_vnsra_wv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vnsra_wv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vnsra_wx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { +vint32m1_t test_vnsra_wx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vnsra_wv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { +vint32m2_t test_vnsra_wv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vnsra_wv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vnsra_wx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { +vint32m2_t test_vnsra_wx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vnsra_wv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { +vint32m4_t test_vnsra_wv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vnsra_wv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vnsra_wx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { +vint32m4_t test_vnsra_wx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vnsra_wv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vnsra_wv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vnsra_wv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vnsra_wx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { +vint8mf8_t test_vnsra_wx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint16mf4_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vnsra_wv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vnsra_wv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vnsra_wv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vnsra_wx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { +vint8mf4_t test_vnsra_wx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint16mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vnsra_wv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vnsra_wv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vnsra_wv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vnsra_wx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { +vint8mf2_t test_vnsra_wx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint16m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vnsra_wv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, vuint8m1_t vs1, size_t vl) { +vint8m1_t test_vnsra_wv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vnsra_wv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vnsra_wx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { +vint8m1_t test_vnsra_wx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint16m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vnsra_wv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, vuint8m2_t vs1, size_t vl) { +vint8m2_t test_vnsra_wv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vnsra_wv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vnsra_wx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { +vint8m2_t test_vnsra_wx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint16m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vnsra_wv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, vuint8m4_t vs1, size_t vl) { +vint8m4_t test_vnsra_wv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vnsra_wv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vnsra_wx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { +vint8m4_t test_vnsra_wx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint16m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vnsra_wv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vnsra_wv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint32mf2_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vnsra_wv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vnsra_wx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { +vint16mf4_t test_vnsra_wx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vnsra_wx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vnsra_wv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vnsra_wv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint32m1_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vnsra_wv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vnsra_wx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { +vint16mf2_t test_vnsra_wx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vnsra_wx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vnsra_wv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vnsra_wv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vnsra_wv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vnsra_wx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { +vint16m1_t test_vnsra_wx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint32m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vnsra_wv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vnsra_wv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vnsra_wv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vnsra_wx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { +vint16m2_t test_vnsra_wx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint32m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vnsra_wv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vnsra_wv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vnsra_wv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vnsra_wx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { +vint16m4_t test_vnsra_wx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint32m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vnsra_wv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vnsra_wv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint64m1_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vnsra_wv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vnsra_wx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { +vint32mf2_t test_vnsra_wx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint64m1_t vs2, size_t rs1, size_t vl) { return __riscv_vnsra_wx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vnsra_wv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, vuint32m1_t vs1, size_t vl) { +vint32m1_t test_vnsra_wv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vnsra_wv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vnsra_wx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { +vint32m1_t test_vnsra_wx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint64m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vnsra_wv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, vuint32m2_t vs1, size_t vl) { +vint32m2_t test_vnsra_wv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vnsra_wv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vnsra_wx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { +vint32m2_t test_vnsra_wx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint64m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vnsra_wv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, vuint32m4_t vs1, size_t vl) { +vint32m4_t test_vnsra_wv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vnsra_wv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vnsra_wx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { +vint32m4_t test_vnsra_wx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint64m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsra_wx_i32m4_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vnsrl.c b/auto-generated/policy_funcs/llvm-api-tests/vnsrl.c index a1ab49a37..09652f43a 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vnsrl.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vnsrl.c @@ -5,482 +5,636 @@ #include -vuint8mf8_t test_vnsrl_wv_u8mf8_tu(vuint8mf8_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vnsrl_wv_u8mf8_tu(vuint8mf8_t vd, vuint16mf4_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vnsrl_wv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vnsrl_wx_u8mf8_tu(vuint8mf8_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint8mf8_t test_vnsrl_wx_u8mf8_tu(vuint8mf8_t vd, vuint16mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vnsrl_wx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vnsrl_wv_u8mf4_tu(vuint8mf4_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vnsrl_wv_u8mf4_tu(vuint8mf4_t vd, vuint16mf2_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vnsrl_wv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vnsrl_wx_u8mf4_tu(vuint8mf4_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint8mf4_t test_vnsrl_wx_u8mf4_tu(vuint8mf4_t vd, vuint16mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vnsrl_wx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vnsrl_wv_u8mf2_tu(vuint8mf2_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vnsrl_wv_u8mf2_tu(vuint8mf2_t vd, vuint16m1_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vnsrl_wv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vnsrl_wx_u8mf2_tu(vuint8mf2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint8mf2_t test_vnsrl_wx_u8mf2_tu(vuint8mf2_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vnsrl_wx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vnsrl_wv_u8m1_tu(vuint8m1_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vnsrl_wv_u8m1_tu(vuint8m1_t vd, vuint16m2_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vnsrl_wx_u8m1_tu(vuint8m1_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint8m1_t test_vnsrl_wx_u8m1_tu(vuint8m1_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vnsrl_wx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vnsrl_wv_u8m2_tu(vuint8m2_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vnsrl_wv_u8m2_tu(vuint8m2_t vd, vuint16m4_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vnsrl_wx_u8m2_tu(vuint8m2_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint8m2_t test_vnsrl_wx_u8m2_tu(vuint8m2_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vnsrl_wx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vnsrl_wv_u8m4_tu(vuint8m4_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vnsrl_wv_u8m4_tu(vuint8m4_t vd, vuint16m8_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vnsrl_wx_u8m4_tu(vuint8m4_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint8m4_t test_vnsrl_wx_u8m4_tu(vuint8m4_t vd, vuint16m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vnsrl_wx_u8m4_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vnsrl_wv_u16mf4_tu(vuint16mf4_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vnsrl_wv_u16mf4_tu(vuint16mf4_t vd, vuint32mf2_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vnsrl_wv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vnsrl_wx_u16mf4_tu(vuint16mf4_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vnsrl_wx_u16mf4_tu(vuint16mf4_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vnsrl_wv_u16mf2_tu(vuint16mf2_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vnsrl_wv_u16mf2_tu(vuint16mf2_t vd, vuint32m1_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vnsrl_wv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vnsrl_wx_u16mf2_tu(vuint16mf2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vnsrl_wx_u16mf2_tu(vuint16mf2_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vnsrl_wv_u16m1_tu(vuint16m1_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vnsrl_wv_u16m1_tu(vuint16m1_t vd, vuint32m2_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vnsrl_wv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vnsrl_wx_u16m1_tu(vuint16m1_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vnsrl_wx_u16m1_tu(vuint16m1_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vnsrl_wx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vnsrl_wv_u16m2_tu(vuint16m2_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vnsrl_wv_u16m2_tu(vuint16m2_t vd, vuint32m4_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vnsrl_wv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vnsrl_wx_u16m2_tu(vuint16m2_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vnsrl_wx_u16m2_tu(vuint16m2_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vnsrl_wx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vnsrl_wv_u16m4_tu(vuint16m4_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vnsrl_wv_u16m4_tu(vuint16m4_t vd, vuint32m8_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vnsrl_wv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vnsrl_wx_u16m4_tu(vuint16m4_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vnsrl_wx_u16m4_tu(vuint16m4_t vd, vuint32m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vnsrl_wx_u16m4_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vnsrl_wv_u32mf2_tu(vuint32mf2_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vnsrl_wv_u32mf2_tu(vuint32mf2_t vd, vuint64m1_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vnsrl_wv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vnsrl_wx_u32mf2_tu(vuint32mf2_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vnsrl_wx_u32mf2_tu(vuint32mf2_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vnsrl_wv_u32m1_tu(vuint32m1_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vnsrl_wv_u32m1_tu(vuint32m1_t vd, vuint64m2_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vnsrl_wv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vnsrl_wx_u32m1_tu(vuint32m1_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vnsrl_wx_u32m1_tu(vuint32m1_t vd, vuint64m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vnsrl_wx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vnsrl_wv_u32m2_tu(vuint32m2_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vnsrl_wv_u32m2_tu(vuint32m2_t vd, vuint64m4_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vnsrl_wv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vnsrl_wx_u32m2_tu(vuint32m2_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vnsrl_wx_u32m2_tu(vuint32m2_t vd, vuint64m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vnsrl_wx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vnsrl_wv_u32m4_tu(vuint32m4_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vnsrl_wv_u32m4_tu(vuint32m4_t vd, vuint64m8_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vnsrl_wv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vnsrl_wx_u32m4_tu(vuint32m4_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vnsrl_wx_u32m4_tu(vuint32m4_t vd, vuint64m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vnsrl_wx_u32m4_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vnsrl_wv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vnsrl_wv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint16mf4_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vnsrl_wx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint8mf8_t test_vnsrl_wx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vnsrl_wv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vnsrl_wv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint16mf2_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vnsrl_wx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint8mf4_t test_vnsrl_wx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vnsrl_wv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vnsrl_wv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint16m1_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vnsrl_wx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint8mf2_t test_vnsrl_wx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vnsrl_wv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vnsrl_wv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vnsrl_wv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vnsrl_wx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint8m1_t test_vnsrl_wx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vnsrl_wv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vnsrl_wv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vnsrl_wv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vnsrl_wx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint8m2_t test_vnsrl_wx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vnsrl_wv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vnsrl_wv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vnsrl_wv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vnsrl_wx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint8m4_t test_vnsrl_wx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vnsrl_wv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vnsrl_wv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint32mf2_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vnsrl_wx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vnsrl_wx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vnsrl_wv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vnsrl_wv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint32m1_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vnsrl_wx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vnsrl_wx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vnsrl_wv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vnsrl_wv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint32m2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vnsrl_wx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vnsrl_wx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vnsrl_wv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vnsrl_wv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint32m4_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vnsrl_wx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vnsrl_wx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vnsrl_wv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vnsrl_wv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint32m8_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vnsrl_wx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vnsrl_wx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vnsrl_wv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vnsrl_wv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint64m1_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vnsrl_wx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vnsrl_wx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vnsrl_wv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vnsrl_wv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint64m2_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vnsrl_wx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vnsrl_wx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vnsrl_wv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vnsrl_wv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint64m4_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vnsrl_wx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vnsrl_wx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vnsrl_wv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vnsrl_wv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint64m8_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vnsrl_wx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vnsrl_wx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vnsrl_wv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vnsrl_wv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint16mf4_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vnsrl_wx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint8mf8_t test_vnsrl_wx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vnsrl_wv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vnsrl_wv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint16mf2_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vnsrl_wx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint8mf4_t test_vnsrl_wx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vnsrl_wv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vnsrl_wv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint16m1_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vnsrl_wx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint8mf2_t test_vnsrl_wx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vnsrl_wv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vnsrl_wv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vnsrl_wv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vnsrl_wx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint8m1_t test_vnsrl_wx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vnsrl_wv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vnsrl_wv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vnsrl_wv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vnsrl_wx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint8m2_t test_vnsrl_wx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vnsrl_wv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vnsrl_wv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vnsrl_wv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vnsrl_wx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint8m4_t test_vnsrl_wx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vnsrl_wv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vnsrl_wv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint32mf2_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vnsrl_wx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vnsrl_wx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint32mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vnsrl_wx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vnsrl_wv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vnsrl_wv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint32m1_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vnsrl_wx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vnsrl_wx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vnsrl_wv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vnsrl_wv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint32m2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vnsrl_wx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vnsrl_wx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vnsrl_wv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vnsrl_wv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint32m4_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vnsrl_wx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vnsrl_wx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vnsrl_wv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vnsrl_wv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint32m8_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vnsrl_wx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vnsrl_wx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vnsrl_wv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vnsrl_wv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint64m1_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vnsrl_wx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vnsrl_wx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vnsrl_wv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vnsrl_wv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint64m2_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vnsrl_wx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vnsrl_wx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vnsrl_wv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vnsrl_wv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint64m4_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vnsrl_wx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vnsrl_wx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vnsrl_wv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vnsrl_wv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint64m8_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vnsrl_wx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vnsrl_wx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vnsrl_wv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vnsrl_wv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint16mf4_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vnsrl_wx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint8mf8_t test_vnsrl_wx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vnsrl_wv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vnsrl_wv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint16mf2_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vnsrl_wx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint8mf4_t test_vnsrl_wx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vnsrl_wv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vnsrl_wv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint16m1_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vnsrl_wx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint8mf2_t test_vnsrl_wx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vnsrl_wv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vnsrl_wv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vnsrl_wv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vnsrl_wx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint8m1_t test_vnsrl_wx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vnsrl_wv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vnsrl_wv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vnsrl_wv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vnsrl_wx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint8m2_t test_vnsrl_wx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vnsrl_wv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vnsrl_wv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vnsrl_wv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vnsrl_wx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint8m4_t test_vnsrl_wx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vnsrl_wv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vnsrl_wv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint32mf2_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vnsrl_wx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vnsrl_wx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vnsrl_wv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vnsrl_wv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint32m1_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vnsrl_wx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vnsrl_wx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vnsrl_wv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vnsrl_wv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint32m2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vnsrl_wx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vnsrl_wx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vnsrl_wv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vnsrl_wv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vnsrl_wv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vnsrl_wx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vnsrl_wx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vnsrl_wv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vnsrl_wv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vnsrl_wv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vnsrl_wx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vnsrl_wx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vnsrl_wv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vnsrl_wv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint64m1_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vnsrl_wx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vnsrl_wx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vnsrl_wv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vnsrl_wv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint64m2_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vnsrl_wx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vnsrl_wx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vnsrl_wv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vnsrl_wv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint64m4_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vnsrl_wv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vnsrl_wx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vnsrl_wx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vnsrl_wv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vnsrl_wv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vnsrl_wv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vnsrl_wx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vnsrl_wx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vnsrl_wx_u32m4_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vor.c b/auto-generated/policy_funcs/llvm-api-tests/vor.c index 2be7f3bfe..fc6a30a00 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vor.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vor.c @@ -5,1410 +5,1789 @@ #include -vint8mf8_t test_vor_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vor_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, + size_t vl) { return __riscv_vor_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vor_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vor_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vor_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vor_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vor_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, + size_t vl) { return __riscv_vor_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vor_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vor_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vor_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vor_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vor_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, + size_t vl) { return __riscv_vor_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vor_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vor_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vor_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vor_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vor_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vor_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vor_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vor_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, + size_t vl) { return __riscv_vor_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vor_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vor_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, + size_t vl) { return __riscv_vor_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vor_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vor_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vor_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vor_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vor_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, + size_t vl) { return __riscv_vor_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vor_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vor_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vor_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vor_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vor_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, + size_t vl) { return __riscv_vor_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vor_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vor_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vor_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vor_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vor_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + vint16mf4_t vs1, size_t vl) { return __riscv_vor_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vor_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vor_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vor_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vor_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vor_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vor_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vor_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vor_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vor_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vor_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vor_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vor_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vor_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vor_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, + size_t vl) { return __riscv_vor_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vor_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vor_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, + size_t vl) { return __riscv_vor_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vor_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vor_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vor_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vor_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vor_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, + size_t vl) { return __riscv_vor_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vor_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vor_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vor_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vor_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vor_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, + size_t vl) { return __riscv_vor_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vor_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vor_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, + size_t vl) { return __riscv_vor_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vor_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vor_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vor_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vor_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vor_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vor_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vor_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vor_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vor_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vor_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vor_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, + size_t vl) { return __riscv_vor_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vor_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vor_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, + size_t vl) { return __riscv_vor_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vor_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vor_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vor_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vor_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vor_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, + size_t vl) { return __riscv_vor_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vor_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vor_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, + size_t vl) { return __riscv_vor_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vor_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vor_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, + size_t vl) { return __riscv_vor_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vor_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vor_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, + size_t vl) { return __riscv_vor_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vor_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vor_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vor_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vor_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vor_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, + size_t vl) { return __riscv_vor_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vor_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vor_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, + size_t vl) { return __riscv_vor_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vor_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vor_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, + size_t vl) { return __riscv_vor_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vor_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vor_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, + size_t vl) { return __riscv_vor_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vor_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vor_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, + size_t vl) { return __riscv_vor_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vor_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vor_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, + size_t vl) { return __riscv_vor_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vor_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vor_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, + size_t vl) { return __riscv_vor_vx_i64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vor_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vor_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vor_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vor_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vor_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vor_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vor_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vor_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vor_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vor_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vor_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vor_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vor_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vor_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vor_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vor_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vor_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vor_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vor_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vor_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vor_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vor_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vor_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vor_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vor_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vor_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vor_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vor_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vor_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vor_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vor_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vor_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vor_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vor_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vor_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vor_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vor_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vor_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { return __riscv_vor_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vor_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vor_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vor_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vor_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vor_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vor_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vor_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vor_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vor_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vor_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vor_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vor_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vor_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vor_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vor_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vor_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vor_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vor_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vor_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vor_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vor_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vor_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vor_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vor_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vor_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vor_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vor_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vor_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vor_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vor_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vor_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vor_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vor_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vor_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vor_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vor_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vor_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vor_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vor_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vor_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vor_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vor_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vor_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vor_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vor_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vor_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vor_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vor_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vor_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vor_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vor_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vor_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vor_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vor_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vor_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vor_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vor_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vor_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vor_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vor_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vor_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vor_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vor_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vor_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vor_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vor_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vor_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vor_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vor_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vor_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vor_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vor_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vor_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vor_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vor_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vor_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vor_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vor_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vor_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vor_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vor_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vor_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vor_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vor_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vor_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vor_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vor_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vor_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vor_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vor_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vor_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vor_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vor_vx_u64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vor_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vor_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vor_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vor_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vor_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vor_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vor_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vor_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vor_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vor_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vor_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vor_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vor_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vor_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vor_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vor_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vor_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vor_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vor_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vor_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vor_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vor_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vor_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vor_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vor_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vor_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vor_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vor_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vor_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vor_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vor_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vor_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vor_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vor_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vor_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vor_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vor_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vor_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vor_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vor_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vor_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vor_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vor_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vor_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vor_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vor_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vor_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vor_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vor_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vor_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vor_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vor_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vor_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vor_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vor_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vor_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vor_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vor_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vor_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vor_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vor_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vor_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vor_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vor_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vor_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vor_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vor_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vor_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vor_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vor_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vor_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vor_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vor_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vor_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vor_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vor_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vor_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vor_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vor_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vor_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vor_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vor_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vor_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vor_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vor_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vor_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vor_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vor_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vor_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vor_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vor_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vor_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vor_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vor_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vor_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vor_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vor_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vor_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vor_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vor_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vor_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vor_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vor_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vor_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vor_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vor_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vor_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vor_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vor_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vor_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vor_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vor_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vor_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vor_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vor_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vor_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vor_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vor_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vor_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vor_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vor_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vor_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vor_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vor_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vor_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vor_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vor_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vor_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vor_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vor_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vor_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vor_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vor_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vor_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vor_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vor_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vor_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vor_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vor_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vor_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vor_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vor_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vor_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vor_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vor_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vor_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vor_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vor_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vor_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vor_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vor_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vor_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vor_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vor_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vor_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vor_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vor_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vor_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vor_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vor_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vor_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vor_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vor_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vor_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vor_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vor_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vor_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vor_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vor_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vor_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vor_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vor_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vor_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vor_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vor_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vor_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vor_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vor_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vor_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vor_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vor_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vor_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vor_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vor_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vor_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vor_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vor_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vor_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vor_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vor_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vor_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vor_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vor_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vor_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vor_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vor_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vor_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vor_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vor_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vor_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vor_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vor_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vor_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vor_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vor_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vor_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vor_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vor_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vor_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vor_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vor_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vor_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vor_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vor_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vor_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vor_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vor_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vor_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vor_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vor_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vor_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vor_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vor_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vor_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vor_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vor_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vor_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vor_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vor_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vor_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vor_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vor_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vor_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vor_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vor_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vor_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vor_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vor_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vor_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vor_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vor_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vor_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vor_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vor_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vor_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vor_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vor_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vor_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vor_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vor_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vor_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vor_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vor_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vor_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vor_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vor_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vor_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vor_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vor_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vor_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vor_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vor_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vor_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vor_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vor_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vor_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vor_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vor_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vor_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vor_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vor_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vor_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vor_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vor_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vor_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vor_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vor_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vor_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vor_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vor_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vor_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vor_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vor_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vor_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vor_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vor_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vor_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vor_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vor_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vor_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vor_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vor_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vor_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vor_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vor_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vor_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vor_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vor_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vor_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vor_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vor_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vor_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vor_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vor_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vor_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vor_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vor_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vor_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vor_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vor_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vor_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vor_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vor_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vor_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vor_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vor_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vor_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vor_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vor_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vor_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vor_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vor_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vor_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vor_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vor_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vor_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vor_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vor_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vor_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vor_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vor_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vor_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vor_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vor_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vor_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vor_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vor_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vor_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vor_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vor_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vor_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vor_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vor_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vor_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vor_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vor_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vor_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vor_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vor_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vor_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vor_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vor_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vor_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vor_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vor_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vor_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vor_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vor_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vor_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vor_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vor_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vor_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vor_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vor_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vor_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vor_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vor_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vor_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vor_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vor_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vor_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vor_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vor_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vor_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vor_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vor_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vor_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vor_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vor_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vor_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vor_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vor_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vor_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vor_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vor_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vor_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vor_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vor_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vor_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vor_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vor_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vor_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vor_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vor_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vor_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vor_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vor_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vor_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vor_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vor_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vor_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vor_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vor_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vor_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vor_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vor_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vor_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vor_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vor_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vor_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vor_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vor_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vor_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vor_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vor_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vor_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vor_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vor_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vor_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vor_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vor_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vor_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vor_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vor_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vor_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vor_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vor_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vor_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vor_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vor_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vor_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vor_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vor_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vor_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vor_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vor_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vor_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vor_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vor_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vor_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vor_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vor_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vor_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vor_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vor_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vor_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vor_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vor_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vor_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vor_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vor_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vor_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vor_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vor_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vor_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vor_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vor_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vor_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vor_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vor_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vor_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vor_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vor_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vor_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vor_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vor_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vor_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vor_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vor_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vor_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vor_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vor_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vor_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vor_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vor_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vor_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vor_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vor_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vor_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vor_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vor_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vor_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vor_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vor_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vor_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vor_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vor_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vor_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vor_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vor_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vor_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vor_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vor_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vor_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vor_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vor_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vor_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vor_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vor_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vor_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vor_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vor_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vor_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vor_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vor_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vor_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vor_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vor_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vor_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vor_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vor_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vor_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vor_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vor_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vor_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vor_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vor_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vor_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vor_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vor_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vor_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vor_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vor_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vor_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vor_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vor_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vor_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vor_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vor_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vor_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vor_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vor_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vor_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vor_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vor_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vor_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vor_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vor_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vor_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vor_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vor_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vor_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vor_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vor_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vor_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vor_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vor_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vor_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vor_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vor_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vor_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vor_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vor_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vor_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vor_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vor_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vor_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vor_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vor_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vor_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vor_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vor_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vor_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vor_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vor_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vor_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vor_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vor_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vor_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vor_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vor_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vor_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, + vint16mf4_t vs1, size_t vl) { return __riscv_vor_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vor_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vor_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vor_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vor_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vor_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vor_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vor_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vor_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vor_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vor_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vor_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vor_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vor_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vor_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vor_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vor_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vor_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vor_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vor_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vor_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vor_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vor_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vor_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vor_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vor_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vor_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vor_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vor_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vor_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vor_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vor_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vor_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vor_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vor_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vor_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vor_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vor_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vor_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vor_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vor_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vor_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vor_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vor_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vor_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vor_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vor_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vor_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vor_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vor_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vor_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vor_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vor_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vor_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vor_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vor_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vor_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vor_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vor_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vor_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vor_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vor_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vor_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vor_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vor_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vor_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vor_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vor_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vor_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vor_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vor_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vor_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vor_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vor_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vor_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vor_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vor_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vor_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vor_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vor_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vor_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vor_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vor_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vor_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vor_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vor_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vor_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vor_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vor_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vor_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vor_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vor_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vor_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vor_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vor_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vor_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vor_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vor_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vor_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vor_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vor_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vor_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vor_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vor_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vor_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vor_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vor_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vor_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vor_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vor_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vor_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vor_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vor_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vor_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vor_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vor_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vor_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vor_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vor_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vor_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vor_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vor_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vor_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vor_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vor_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vor_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vor_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vor_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vor_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vor_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vor_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vor_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vor_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vor_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vor_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vor_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vor_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vor_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vor_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vor_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vor_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vor_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vor_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vor_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vor_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vor_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vor_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vor_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vor_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vor_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vor_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vor_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vor_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vor_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vor_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vor_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vor_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vor_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vor_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vor_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vor_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vor_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vor_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vor_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vor_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vor_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vor_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vor_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vor_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vor_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vor_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vor_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vor_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vor_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vor_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vor_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vor_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vor_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vor_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vor_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vor_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vor_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vor_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vor_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vor_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vor_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vor_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vor_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vor_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vor_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vor_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vor_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vor_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vor_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vor_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vor_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vor_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vor_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vor_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vor_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vor_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vor_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vor_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vor_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vor_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vor_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vor_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vor_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vor_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vor_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vor_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vor_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vor_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vor_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vor_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vor_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vor_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vor_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vor_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vor_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vredand.c b/auto-generated/policy_funcs/llvm-api-tests/vredand.c index 7c097f4d8..a1a92086c 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vredand.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vredand.c @@ -5,354 +5,486 @@ #include -vint8m1_t test_vredand_vs_i8mf8_i8m1_tu(vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredand_vs_i8mf8_i8m1_tu(vint8m1_t vd, vint8mf8_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredand_vs_i8mf8_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8mf4_i8m1_tu(vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredand_vs_i8mf4_i8m1_tu(vint8m1_t vd, vint8mf4_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredand_vs_i8mf4_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8mf2_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredand_vs_i8mf2_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredand_vs_i8mf2_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m1_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredand_vs_i8m1_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredand_vs_i8m1_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m2_i8m1_tu(vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredand_vs_i8m2_i8m1_tu(vint8m1_t vd, vint8m2_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredand_vs_i8m2_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m4_i8m1_tu(vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredand_vs_i8m4_i8m1_tu(vint8m1_t vd, vint8m4_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredand_vs_i8m4_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m8_i8m1_tu(vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredand_vs_i8m8_i8m1_tu(vint8m1_t vd, vint8m8_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredand_vs_i8m8_i8m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16mf4_i16m1_tu(vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredand_vs_i16mf4_i16m1_tu(vint16m1_t vd, vint16mf4_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredand_vs_i16mf4_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16mf2_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredand_vs_i16mf2_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredand_vs_i16mf2_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m1_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredand_vs_i16m1_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredand_vs_i16m1_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m2_i16m1_tu(vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredand_vs_i16m2_i16m1_tu(vint16m1_t vd, vint16m2_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredand_vs_i16m2_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m4_i16m1_tu(vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredand_vs_i16m4_i16m1_tu(vint16m1_t vd, vint16m4_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredand_vs_i16m4_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m8_i16m1_tu(vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredand_vs_i16m8_i16m1_tu(vint16m1_t vd, vint16m8_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredand_vs_i16m8_i16m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32mf2_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredand_vs_i32mf2_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vredand_vs_i32mf2_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m1_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredand_vs_i32m1_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vredand_vs_i32m1_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m2_i32m1_tu(vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredand_vs_i32m2_i32m1_tu(vint32m1_t vd, vint32m2_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vredand_vs_i32m2_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m4_i32m1_tu(vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredand_vs_i32m4_i32m1_tu(vint32m1_t vd, vint32m4_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vredand_vs_i32m4_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m8_i32m1_tu(vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredand_vs_i32m8_i32m1_tu(vint32m1_t vd, vint32m8_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vredand_vs_i32m8_i32m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m1_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredand_vs_i64m1_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vredand_vs_i64m1_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m2_i64m1_tu(vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredand_vs_i64m2_i64m1_tu(vint64m1_t vd, vint64m2_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vredand_vs_i64m2_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m4_i64m1_tu(vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredand_vs_i64m4_i64m1_tu(vint64m1_t vd, vint64m4_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vredand_vs_i64m4_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m8_i64m1_tu(vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredand_vs_i64m8_i64m1_tu(vint64m1_t vd, vint64m8_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vredand_vs_i64m8_i64m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8mf8_u8m1_tu(vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredand_vs_u8mf8_u8m1_tu(vuint8m1_t vd, vuint8mf8_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredand_vs_u8mf8_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8mf4_u8m1_tu(vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredand_vs_u8mf4_u8m1_tu(vuint8m1_t vd, vuint8mf4_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredand_vs_u8mf4_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8mf2_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredand_vs_u8mf2_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredand_vs_u8mf2_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m1_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredand_vs_u8m1_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredand_vs_u8m1_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m2_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredand_vs_u8m2_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredand_vs_u8m2_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m4_u8m1_tu(vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredand_vs_u8m4_u8m1_tu(vuint8m1_t vd, vuint8m4_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredand_vs_u8m4_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m8_u8m1_tu(vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredand_vs_u8m8_u8m1_tu(vuint8m1_t vd, vuint8m8_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredand_vs_u8m8_u8m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16mf4_u16m1_tu(vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredand_vs_u16mf4_u16m1_tu(vuint16m1_t vd, vuint16mf4_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredand_vs_u16mf4_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16mf2_u16m1_tu(vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredand_vs_u16mf2_u16m1_tu(vuint16m1_t vd, vuint16mf2_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredand_vs_u16mf2_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m1_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredand_vs_u16m1_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredand_vs_u16m1_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m2_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredand_vs_u16m2_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredand_vs_u16m2_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m4_u16m1_tu(vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredand_vs_u16m4_u16m1_tu(vuint16m1_t vd, vuint16m4_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredand_vs_u16m4_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m8_u16m1_tu(vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredand_vs_u16m8_u16m1_tu(vuint16m1_t vd, vuint16m8_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredand_vs_u16m8_u16m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredand_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vredand_vs_u32mf2_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredand_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vredand_vs_u32m1_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m2_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredand_vs_u32m2_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vredand_vs_u32m2_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m4_u32m1_tu(vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredand_vs_u32m4_u32m1_tu(vuint32m1_t vd, vuint32m4_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vredand_vs_u32m4_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m8_u32m1_tu(vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredand_vs_u32m8_u32m1_tu(vuint32m1_t vd, vuint32m8_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vredand_vs_u32m8_u32m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m1_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredand_vs_u64m1_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vredand_vs_u64m1_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m2_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredand_vs_u64m2_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vredand_vs_u64m2_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m4_u64m1_tu(vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredand_vs_u64m4_u64m1_tu(vuint64m1_t vd, vuint64m4_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vredand_vs_u64m4_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m8_u64m1_tu(vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredand_vs_u64m8_u64m1_tu(vuint64m1_t vd, vuint64m8_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vredand_vs_u64m8_u64m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8mf8_i8m1_tum(vbool64_t vm, vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredand_vs_i8mf8_i8m1_tum(vbool64_t vm, vint8m1_t vd, + vint8mf8_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredand_vs_i8mf8_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8mf4_i8m1_tum(vbool32_t vm, vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredand_vs_i8mf4_i8m1_tum(vbool32_t vm, vint8m1_t vd, + vint8mf4_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredand_vs_i8mf4_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8mf2_i8m1_tum(vbool16_t vm, vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredand_vs_i8mf2_i8m1_tum(vbool16_t vm, vint8m1_t vd, + vint8mf2_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredand_vs_i8mf2_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m1_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredand_vs_i8m1_i8m1_tum(vbool8_t vm, vint8m1_t vd, + vint8m1_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredand_vs_i8m1_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m2_i8m1_tum(vbool4_t vm, vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredand_vs_i8m2_i8m1_tum(vbool4_t vm, vint8m1_t vd, + vint8m2_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredand_vs_i8m2_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m4_i8m1_tum(vbool2_t vm, vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredand_vs_i8m4_i8m1_tum(vbool2_t vm, vint8m1_t vd, + vint8m4_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredand_vs_i8m4_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredand_vs_i8m8_i8m1_tum(vbool1_t vm, vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredand_vs_i8m8_i8m1_tum(vbool1_t vm, vint8m1_t vd, + vint8m8_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredand_vs_i8m8_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16mf4_i16m1_tum(vbool64_t vm, vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredand_vs_i16mf4_i16m1_tum(vbool64_t vm, vint16m1_t vd, + vint16mf4_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredand_vs_i16mf4_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16mf2_i16m1_tum(vbool32_t vm, vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredand_vs_i16mf2_i16m1_tum(vbool32_t vm, vint16m1_t vd, + vint16mf2_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredand_vs_i16mf2_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m1_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredand_vs_i16m1_i16m1_tum(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredand_vs_i16m1_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m2_i16m1_tum(vbool8_t vm, vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredand_vs_i16m2_i16m1_tum(vbool8_t vm, vint16m1_t vd, + vint16m2_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredand_vs_i16m2_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m4_i16m1_tum(vbool4_t vm, vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredand_vs_i16m4_i16m1_tum(vbool4_t vm, vint16m1_t vd, + vint16m4_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredand_vs_i16m4_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredand_vs_i16m8_i16m1_tum(vbool2_t vm, vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredand_vs_i16m8_i16m1_tum(vbool2_t vm, vint16m1_t vd, + vint16m8_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredand_vs_i16m8_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32mf2_i32m1_tum(vbool64_t vm, vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredand_vs_i32mf2_i32m1_tum(vbool64_t vm, vint32m1_t vd, + vint32mf2_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vredand_vs_i32mf2_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m1_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredand_vs_i32m1_i32m1_tum(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vredand_vs_i32m1_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m2_i32m1_tum(vbool16_t vm, vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredand_vs_i32m2_i32m1_tum(vbool16_t vm, vint32m1_t vd, + vint32m2_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vredand_vs_i32m2_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m4_i32m1_tum(vbool8_t vm, vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredand_vs_i32m4_i32m1_tum(vbool8_t vm, vint32m1_t vd, + vint32m4_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vredand_vs_i32m4_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredand_vs_i32m8_i32m1_tum(vbool4_t vm, vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredand_vs_i32m8_i32m1_tum(vbool4_t vm, vint32m1_t vd, + vint32m8_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vredand_vs_i32m8_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m1_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredand_vs_i64m1_i64m1_tum(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vredand_vs_i64m1_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m2_i64m1_tum(vbool32_t vm, vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredand_vs_i64m2_i64m1_tum(vbool32_t vm, vint64m1_t vd, + vint64m2_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vredand_vs_i64m2_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m4_i64m1_tum(vbool16_t vm, vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredand_vs_i64m4_i64m1_tum(vbool16_t vm, vint64m1_t vd, + vint64m4_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vredand_vs_i64m4_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredand_vs_i64m8_i64m1_tum(vbool8_t vm, vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredand_vs_i64m8_i64m1_tum(vbool8_t vm, vint64m1_t vd, + vint64m8_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vredand_vs_i64m8_i64m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8mf8_u8m1_tum(vbool64_t vm, vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredand_vs_u8mf8_u8m1_tum(vbool64_t vm, vuint8m1_t vd, + vuint8mf8_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredand_vs_u8mf8_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8mf4_u8m1_tum(vbool32_t vm, vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredand_vs_u8mf4_u8m1_tum(vbool32_t vm, vuint8m1_t vd, + vuint8mf4_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredand_vs_u8mf4_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8mf2_u8m1_tum(vbool16_t vm, vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredand_vs_u8mf2_u8m1_tum(vbool16_t vm, vuint8m1_t vd, + vuint8mf2_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredand_vs_u8mf2_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m1_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredand_vs_u8m1_u8m1_tum(vbool8_t vm, vuint8m1_t vd, + vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredand_vs_u8m1_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m2_u8m1_tum(vbool4_t vm, vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredand_vs_u8m2_u8m1_tum(vbool4_t vm, vuint8m1_t vd, + vuint8m2_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredand_vs_u8m2_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m4_u8m1_tum(vbool2_t vm, vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredand_vs_u8m4_u8m1_tum(vbool2_t vm, vuint8m1_t vd, + vuint8m4_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredand_vs_u8m4_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredand_vs_u8m8_u8m1_tum(vbool1_t vm, vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredand_vs_u8m8_u8m1_tum(vbool1_t vm, vuint8m1_t vd, + vuint8m8_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredand_vs_u8m8_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16mf4_u16m1_tum(vbool64_t vm, vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredand_vs_u16mf4_u16m1_tum(vbool64_t vm, vuint16m1_t vd, + vuint16mf4_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredand_vs_u16mf4_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16mf2_u16m1_tum(vbool32_t vm, vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredand_vs_u16mf2_u16m1_tum(vbool32_t vm, vuint16m1_t vd, + vuint16mf2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredand_vs_u16mf2_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredand_vs_u16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredand_vs_u16m1_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m2_u16m1_tum(vbool8_t vm, vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredand_vs_u16m2_u16m1_tum(vbool8_t vm, vuint16m1_t vd, + vuint16m2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredand_vs_u16m2_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m4_u16m1_tum(vbool4_t vm, vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredand_vs_u16m4_u16m1_tum(vbool4_t vm, vuint16m1_t vd, + vuint16m4_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredand_vs_u16m4_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredand_vs_u16m8_u16m1_tum(vbool2_t vm, vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredand_vs_u16m8_u16m1_tum(vbool2_t vm, vuint16m1_t vd, + vuint16m8_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredand_vs_u16m8_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32mf2_u32m1_tum(vbool64_t vm, vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredand_vs_u32mf2_u32m1_tum(vbool64_t vm, vuint32m1_t vd, + vuint32mf2_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vredand_vs_u32mf2_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m1_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredand_vs_u32m1_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vredand_vs_u32m1_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m2_u32m1_tum(vbool16_t vm, vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredand_vs_u32m2_u32m1_tum(vbool16_t vm, vuint32m1_t vd, + vuint32m2_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vredand_vs_u32m2_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m4_u32m1_tum(vbool8_t vm, vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredand_vs_u32m4_u32m1_tum(vbool8_t vm, vuint32m1_t vd, + vuint32m4_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vredand_vs_u32m4_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredand_vs_u32m8_u32m1_tum(vbool4_t vm, vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredand_vs_u32m8_u32m1_tum(vbool4_t vm, vuint32m1_t vd, + vuint32m8_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vredand_vs_u32m8_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m1_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredand_vs_u64m1_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vredand_vs_u64m1_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m2_u64m1_tum(vbool32_t vm, vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredand_vs_u64m2_u64m1_tum(vbool32_t vm, vuint64m1_t vd, + vuint64m2_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vredand_vs_u64m2_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m4_u64m1_tum(vbool16_t vm, vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredand_vs_u64m4_u64m1_tum(vbool16_t vm, vuint64m1_t vd, + vuint64m4_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vredand_vs_u64m4_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredand_vs_u64m8_u64m1_tum(vbool8_t vm, vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredand_vs_u64m8_u64m1_tum(vbool8_t vm, vuint64m1_t vd, + vuint64m8_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vredand_vs_u64m8_u64m1_tum(vm, vd, vs2, vs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vredmax.c b/auto-generated/policy_funcs/llvm-api-tests/vredmax.c index 987c671a3..45d1dfebe 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vredmax.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vredmax.c @@ -5,178 +5,244 @@ #include -vint8m1_t test_vredmax_vs_i8mf8_i8m1_tu(vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredmax_vs_i8mf8_i8m1_tu(vint8m1_t vd, vint8mf8_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredmax_vs_i8mf8_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8mf4_i8m1_tu(vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredmax_vs_i8mf4_i8m1_tu(vint8m1_t vd, vint8mf4_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredmax_vs_i8mf4_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8mf2_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredmax_vs_i8mf2_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredmax_vs_i8mf2_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m1_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredmax_vs_i8m1_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredmax_vs_i8m1_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m2_i8m1_tu(vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredmax_vs_i8m2_i8m1_tu(vint8m1_t vd, vint8m2_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredmax_vs_i8m2_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m4_i8m1_tu(vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredmax_vs_i8m4_i8m1_tu(vint8m1_t vd, vint8m4_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredmax_vs_i8m4_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m8_i8m1_tu(vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredmax_vs_i8m8_i8m1_tu(vint8m1_t vd, vint8m8_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredmax_vs_i8m8_i8m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16mf4_i16m1_tu(vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredmax_vs_i16mf4_i16m1_tu(vint16m1_t vd, vint16mf4_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredmax_vs_i16mf4_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16mf2_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredmax_vs_i16mf2_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredmax_vs_i16mf2_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m1_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredmax_vs_i16m1_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredmax_vs_i16m1_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m2_i16m1_tu(vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredmax_vs_i16m2_i16m1_tu(vint16m1_t vd, vint16m2_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredmax_vs_i16m2_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m4_i16m1_tu(vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredmax_vs_i16m4_i16m1_tu(vint16m1_t vd, vint16m4_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredmax_vs_i16m4_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m8_i16m1_tu(vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredmax_vs_i16m8_i16m1_tu(vint16m1_t vd, vint16m8_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredmax_vs_i16m8_i16m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32mf2_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredmax_vs_i32mf2_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vredmax_vs_i32mf2_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m1_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredmax_vs_i32m1_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vredmax_vs_i32m1_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m2_i32m1_tu(vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredmax_vs_i32m2_i32m1_tu(vint32m1_t vd, vint32m2_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vredmax_vs_i32m2_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m4_i32m1_tu(vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredmax_vs_i32m4_i32m1_tu(vint32m1_t vd, vint32m4_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vredmax_vs_i32m4_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m8_i32m1_tu(vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredmax_vs_i32m8_i32m1_tu(vint32m1_t vd, vint32m8_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vredmax_vs_i32m8_i32m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m1_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredmax_vs_i64m1_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vredmax_vs_i64m1_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m2_i64m1_tu(vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredmax_vs_i64m2_i64m1_tu(vint64m1_t vd, vint64m2_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vredmax_vs_i64m2_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m4_i64m1_tu(vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredmax_vs_i64m4_i64m1_tu(vint64m1_t vd, vint64m4_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vredmax_vs_i64m4_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m8_i64m1_tu(vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredmax_vs_i64m8_i64m1_tu(vint64m1_t vd, vint64m8_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vredmax_vs_i64m8_i64m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8mf8_i8m1_tum(vbool64_t vm, vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredmax_vs_i8mf8_i8m1_tum(vbool64_t vm, vint8m1_t vd, + vint8mf8_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredmax_vs_i8mf8_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8mf4_i8m1_tum(vbool32_t vm, vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredmax_vs_i8mf4_i8m1_tum(vbool32_t vm, vint8m1_t vd, + vint8mf4_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredmax_vs_i8mf4_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8mf2_i8m1_tum(vbool16_t vm, vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredmax_vs_i8mf2_i8m1_tum(vbool16_t vm, vint8m1_t vd, + vint8mf2_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredmax_vs_i8mf2_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m1_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredmax_vs_i8m1_i8m1_tum(vbool8_t vm, vint8m1_t vd, + vint8m1_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredmax_vs_i8m1_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m2_i8m1_tum(vbool4_t vm, vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredmax_vs_i8m2_i8m1_tum(vbool4_t vm, vint8m1_t vd, + vint8m2_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredmax_vs_i8m2_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m4_i8m1_tum(vbool2_t vm, vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredmax_vs_i8m4_i8m1_tum(vbool2_t vm, vint8m1_t vd, + vint8m4_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredmax_vs_i8m4_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmax_vs_i8m8_i8m1_tum(vbool1_t vm, vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredmax_vs_i8m8_i8m1_tum(vbool1_t vm, vint8m1_t vd, + vint8m8_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredmax_vs_i8m8_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16mf4_i16m1_tum(vbool64_t vm, vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredmax_vs_i16mf4_i16m1_tum(vbool64_t vm, vint16m1_t vd, + vint16mf4_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredmax_vs_i16mf4_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16mf2_i16m1_tum(vbool32_t vm, vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredmax_vs_i16mf2_i16m1_tum(vbool32_t vm, vint16m1_t vd, + vint16mf2_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredmax_vs_i16mf2_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m1_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredmax_vs_i16m1_i16m1_tum(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredmax_vs_i16m1_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m2_i16m1_tum(vbool8_t vm, vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredmax_vs_i16m2_i16m1_tum(vbool8_t vm, vint16m1_t vd, + vint16m2_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredmax_vs_i16m2_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m4_i16m1_tum(vbool4_t vm, vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredmax_vs_i16m4_i16m1_tum(vbool4_t vm, vint16m1_t vd, + vint16m4_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredmax_vs_i16m4_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmax_vs_i16m8_i16m1_tum(vbool2_t vm, vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredmax_vs_i16m8_i16m1_tum(vbool2_t vm, vint16m1_t vd, + vint16m8_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredmax_vs_i16m8_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32mf2_i32m1_tum(vbool64_t vm, vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredmax_vs_i32mf2_i32m1_tum(vbool64_t vm, vint32m1_t vd, + vint32mf2_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vredmax_vs_i32mf2_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m1_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredmax_vs_i32m1_i32m1_tum(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vredmax_vs_i32m1_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m2_i32m1_tum(vbool16_t vm, vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredmax_vs_i32m2_i32m1_tum(vbool16_t vm, vint32m1_t vd, + vint32m2_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vredmax_vs_i32m2_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m4_i32m1_tum(vbool8_t vm, vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredmax_vs_i32m4_i32m1_tum(vbool8_t vm, vint32m1_t vd, + vint32m4_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vredmax_vs_i32m4_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredmax_vs_i32m8_i32m1_tum(vbool4_t vm, vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredmax_vs_i32m8_i32m1_tum(vbool4_t vm, vint32m1_t vd, + vint32m8_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vredmax_vs_i32m8_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m1_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredmax_vs_i64m1_i64m1_tum(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vredmax_vs_i64m1_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m2_i64m1_tum(vbool32_t vm, vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredmax_vs_i64m2_i64m1_tum(vbool32_t vm, vint64m1_t vd, + vint64m2_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vredmax_vs_i64m2_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m4_i64m1_tum(vbool16_t vm, vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredmax_vs_i64m4_i64m1_tum(vbool16_t vm, vint64m1_t vd, + vint64m4_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vredmax_vs_i64m4_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredmax_vs_i64m8_i64m1_tum(vbool8_t vm, vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredmax_vs_i64m8_i64m1_tum(vbool8_t vm, vint64m1_t vd, + vint64m8_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vredmax_vs_i64m8_i64m1_tum(vm, vd, vs2, vs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vredmaxu.c b/auto-generated/policy_funcs/llvm-api-tests/vredmaxu.c index 649f42479..27a0040ad 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vredmaxu.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vredmaxu.c @@ -5,178 +5,244 @@ #include -vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_tu(vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_tu(vuint8m1_t vd, vuint8mf8_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredmaxu_vs_u8mf8_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_tu(vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_tu(vuint8m1_t vd, vuint8mf4_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredmaxu_vs_u8mf4_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredmaxu_vs_u8mf2_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredmaxu_vs_u8m1_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredmaxu_vs_u8m2_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_tu(vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_tu(vuint8m1_t vd, vuint8m4_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredmaxu_vs_u8m4_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_tu(vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_tu(vuint8m1_t vd, vuint8m8_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredmaxu_vs_u8m8_u8m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_tu(vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_tu(vuint16m1_t vd, vuint16mf4_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredmaxu_vs_u16mf4_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_tu(vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_tu(vuint16m1_t vd, vuint16mf2_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredmaxu_vs_u16mf2_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredmaxu_vs_u16m1_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredmaxu_vs_u16m2_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_tu(vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_tu(vuint16m1_t vd, vuint16m4_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredmaxu_vs_u16m4_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_tu(vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_tu(vuint16m1_t vd, vuint16m8_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredmaxu_vs_u16m8_u16m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vredmaxu_vs_u32mf2_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vredmaxu_vs_u32m1_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vredmaxu_vs_u32m2_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_tu(vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_tu(vuint32m1_t vd, vuint32m4_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vredmaxu_vs_u32m4_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_tu(vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_tu(vuint32m1_t vd, vuint32m8_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vredmaxu_vs_u32m8_u32m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vredmaxu_vs_u64m1_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vredmaxu_vs_u64m2_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_tu(vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_tu(vuint64m1_t vd, vuint64m4_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vredmaxu_vs_u64m4_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_tu(vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_tu(vuint64m1_t vd, vuint64m8_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vredmaxu_vs_u64m8_u64m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_tum(vbool64_t vm, vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_tum(vbool64_t vm, vuint8m1_t vd, + vuint8mf8_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredmaxu_vs_u8mf8_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_tum(vbool32_t vm, vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_tum(vbool32_t vm, vuint8m1_t vd, + vuint8mf4_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredmaxu_vs_u8mf4_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_tum(vbool16_t vm, vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_tum(vbool16_t vm, vuint8m1_t vd, + vuint8mf2_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredmaxu_vs_u8mf2_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_tum(vbool8_t vm, vuint8m1_t vd, + vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredmaxu_vs_u8m1_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_tum(vbool4_t vm, vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_tum(vbool4_t vm, vuint8m1_t vd, + vuint8m2_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredmaxu_vs_u8m2_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_tum(vbool2_t vm, vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_tum(vbool2_t vm, vuint8m1_t vd, + vuint8m4_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredmaxu_vs_u8m4_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_tum(vbool1_t vm, vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_tum(vbool1_t vm, vuint8m1_t vd, + vuint8m8_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredmaxu_vs_u8m8_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_tum(vbool64_t vm, vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_tum(vbool64_t vm, vuint16m1_t vd, + vuint16mf4_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredmaxu_vs_u16mf4_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_tum(vbool32_t vm, vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_tum(vbool32_t vm, vuint16m1_t vd, + vuint16mf2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredmaxu_vs_u16mf2_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredmaxu_vs_u16m1_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_tum(vbool8_t vm, vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_tum(vbool8_t vm, vuint16m1_t vd, + vuint16m2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredmaxu_vs_u16m2_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_tum(vbool4_t vm, vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_tum(vbool4_t vm, vuint16m1_t vd, + vuint16m4_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredmaxu_vs_u16m4_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_tum(vbool2_t vm, vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_tum(vbool2_t vm, vuint16m1_t vd, + vuint16m8_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredmaxu_vs_u16m8_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tum(vbool64_t vm, vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_tum(vbool64_t vm, vuint32m1_t vd, + vuint32mf2_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vredmaxu_vs_u32mf2_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vredmaxu_vs_u32m1_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_tum(vbool16_t vm, vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_tum(vbool16_t vm, vuint32m1_t vd, + vuint32m2_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vredmaxu_vs_u32m2_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_tum(vbool8_t vm, vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_tum(vbool8_t vm, vuint32m1_t vd, + vuint32m4_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vredmaxu_vs_u32m4_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_tum(vbool4_t vm, vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_tum(vbool4_t vm, vuint32m1_t vd, + vuint32m8_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vredmaxu_vs_u32m8_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vredmaxu_vs_u64m1_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_tum(vbool32_t vm, vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_tum(vbool32_t vm, vuint64m1_t vd, + vuint64m2_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vredmaxu_vs_u64m2_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_tum(vbool16_t vm, vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_tum(vbool16_t vm, vuint64m1_t vd, + vuint64m4_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vredmaxu_vs_u64m4_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_tum(vbool8_t vm, vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_tum(vbool8_t vm, vuint64m1_t vd, + vuint64m8_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vredmaxu_vs_u64m8_u64m1_tum(vm, vd, vs2, vs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vredmin.c b/auto-generated/policy_funcs/llvm-api-tests/vredmin.c index 2888440b3..4b920121b 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vredmin.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vredmin.c @@ -5,178 +5,244 @@ #include -vint8m1_t test_vredmin_vs_i8mf8_i8m1_tu(vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredmin_vs_i8mf8_i8m1_tu(vint8m1_t vd, vint8mf8_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredmin_vs_i8mf8_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8mf4_i8m1_tu(vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredmin_vs_i8mf4_i8m1_tu(vint8m1_t vd, vint8mf4_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredmin_vs_i8mf4_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8mf2_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredmin_vs_i8mf2_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredmin_vs_i8mf2_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m1_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredmin_vs_i8m1_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredmin_vs_i8m1_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m2_i8m1_tu(vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredmin_vs_i8m2_i8m1_tu(vint8m1_t vd, vint8m2_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredmin_vs_i8m2_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m4_i8m1_tu(vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredmin_vs_i8m4_i8m1_tu(vint8m1_t vd, vint8m4_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredmin_vs_i8m4_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m8_i8m1_tu(vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredmin_vs_i8m8_i8m1_tu(vint8m1_t vd, vint8m8_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredmin_vs_i8m8_i8m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16mf4_i16m1_tu(vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredmin_vs_i16mf4_i16m1_tu(vint16m1_t vd, vint16mf4_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredmin_vs_i16mf4_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16mf2_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredmin_vs_i16mf2_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredmin_vs_i16mf2_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m1_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredmin_vs_i16m1_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredmin_vs_i16m1_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m2_i16m1_tu(vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredmin_vs_i16m2_i16m1_tu(vint16m1_t vd, vint16m2_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredmin_vs_i16m2_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m4_i16m1_tu(vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredmin_vs_i16m4_i16m1_tu(vint16m1_t vd, vint16m4_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredmin_vs_i16m4_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m8_i16m1_tu(vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredmin_vs_i16m8_i16m1_tu(vint16m1_t vd, vint16m8_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredmin_vs_i16m8_i16m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32mf2_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredmin_vs_i32mf2_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vredmin_vs_i32mf2_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m1_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredmin_vs_i32m1_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vredmin_vs_i32m1_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m2_i32m1_tu(vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredmin_vs_i32m2_i32m1_tu(vint32m1_t vd, vint32m2_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vredmin_vs_i32m2_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m4_i32m1_tu(vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredmin_vs_i32m4_i32m1_tu(vint32m1_t vd, vint32m4_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vredmin_vs_i32m4_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m8_i32m1_tu(vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredmin_vs_i32m8_i32m1_tu(vint32m1_t vd, vint32m8_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vredmin_vs_i32m8_i32m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m1_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredmin_vs_i64m1_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vredmin_vs_i64m1_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m2_i64m1_tu(vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredmin_vs_i64m2_i64m1_tu(vint64m1_t vd, vint64m2_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vredmin_vs_i64m2_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m4_i64m1_tu(vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredmin_vs_i64m4_i64m1_tu(vint64m1_t vd, vint64m4_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vredmin_vs_i64m4_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m8_i64m1_tu(vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredmin_vs_i64m8_i64m1_tu(vint64m1_t vd, vint64m8_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vredmin_vs_i64m8_i64m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8mf8_i8m1_tum(vbool64_t vm, vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredmin_vs_i8mf8_i8m1_tum(vbool64_t vm, vint8m1_t vd, + vint8mf8_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredmin_vs_i8mf8_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8mf4_i8m1_tum(vbool32_t vm, vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredmin_vs_i8mf4_i8m1_tum(vbool32_t vm, vint8m1_t vd, + vint8mf4_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredmin_vs_i8mf4_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8mf2_i8m1_tum(vbool16_t vm, vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredmin_vs_i8mf2_i8m1_tum(vbool16_t vm, vint8m1_t vd, + vint8mf2_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredmin_vs_i8mf2_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m1_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredmin_vs_i8m1_i8m1_tum(vbool8_t vm, vint8m1_t vd, + vint8m1_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredmin_vs_i8m1_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m2_i8m1_tum(vbool4_t vm, vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredmin_vs_i8m2_i8m1_tum(vbool4_t vm, vint8m1_t vd, + vint8m2_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredmin_vs_i8m2_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m4_i8m1_tum(vbool2_t vm, vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredmin_vs_i8m4_i8m1_tum(vbool2_t vm, vint8m1_t vd, + vint8m4_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredmin_vs_i8m4_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredmin_vs_i8m8_i8m1_tum(vbool1_t vm, vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredmin_vs_i8m8_i8m1_tum(vbool1_t vm, vint8m1_t vd, + vint8m8_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredmin_vs_i8m8_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16mf4_i16m1_tum(vbool64_t vm, vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredmin_vs_i16mf4_i16m1_tum(vbool64_t vm, vint16m1_t vd, + vint16mf4_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredmin_vs_i16mf4_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16mf2_i16m1_tum(vbool32_t vm, vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredmin_vs_i16mf2_i16m1_tum(vbool32_t vm, vint16m1_t vd, + vint16mf2_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredmin_vs_i16mf2_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m1_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredmin_vs_i16m1_i16m1_tum(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredmin_vs_i16m1_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m2_i16m1_tum(vbool8_t vm, vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredmin_vs_i16m2_i16m1_tum(vbool8_t vm, vint16m1_t vd, + vint16m2_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredmin_vs_i16m2_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m4_i16m1_tum(vbool4_t vm, vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredmin_vs_i16m4_i16m1_tum(vbool4_t vm, vint16m1_t vd, + vint16m4_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredmin_vs_i16m4_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredmin_vs_i16m8_i16m1_tum(vbool2_t vm, vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredmin_vs_i16m8_i16m1_tum(vbool2_t vm, vint16m1_t vd, + vint16m8_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredmin_vs_i16m8_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32mf2_i32m1_tum(vbool64_t vm, vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredmin_vs_i32mf2_i32m1_tum(vbool64_t vm, vint32m1_t vd, + vint32mf2_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vredmin_vs_i32mf2_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m1_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredmin_vs_i32m1_i32m1_tum(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vredmin_vs_i32m1_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m2_i32m1_tum(vbool16_t vm, vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredmin_vs_i32m2_i32m1_tum(vbool16_t vm, vint32m1_t vd, + vint32m2_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vredmin_vs_i32m2_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m4_i32m1_tum(vbool8_t vm, vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredmin_vs_i32m4_i32m1_tum(vbool8_t vm, vint32m1_t vd, + vint32m4_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vredmin_vs_i32m4_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredmin_vs_i32m8_i32m1_tum(vbool4_t vm, vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredmin_vs_i32m8_i32m1_tum(vbool4_t vm, vint32m1_t vd, + vint32m8_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vredmin_vs_i32m8_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m1_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredmin_vs_i64m1_i64m1_tum(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vredmin_vs_i64m1_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m2_i64m1_tum(vbool32_t vm, vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredmin_vs_i64m2_i64m1_tum(vbool32_t vm, vint64m1_t vd, + vint64m2_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vredmin_vs_i64m2_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m4_i64m1_tum(vbool16_t vm, vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredmin_vs_i64m4_i64m1_tum(vbool16_t vm, vint64m1_t vd, + vint64m4_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vredmin_vs_i64m4_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredmin_vs_i64m8_i64m1_tum(vbool8_t vm, vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredmin_vs_i64m8_i64m1_tum(vbool8_t vm, vint64m1_t vd, + vint64m8_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vredmin_vs_i64m8_i64m1_tum(vm, vd, vs2, vs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vredminu.c b/auto-generated/policy_funcs/llvm-api-tests/vredminu.c index 5bfb77f93..96ade0849 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vredminu.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vredminu.c @@ -5,178 +5,244 @@ #include -vuint8m1_t test_vredminu_vs_u8mf8_u8m1_tu(vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredminu_vs_u8mf8_u8m1_tu(vuint8m1_t vd, vuint8mf8_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredminu_vs_u8mf8_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8mf4_u8m1_tu(vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredminu_vs_u8mf4_u8m1_tu(vuint8m1_t vd, vuint8mf4_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredminu_vs_u8mf4_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8mf2_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredminu_vs_u8mf2_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredminu_vs_u8mf2_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m1_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredminu_vs_u8m1_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredminu_vs_u8m1_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m2_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredminu_vs_u8m2_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredminu_vs_u8m2_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m4_u8m1_tu(vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredminu_vs_u8m4_u8m1_tu(vuint8m1_t vd, vuint8m4_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredminu_vs_u8m4_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m8_u8m1_tu(vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredminu_vs_u8m8_u8m1_tu(vuint8m1_t vd, vuint8m8_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredminu_vs_u8m8_u8m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16mf4_u16m1_tu(vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredminu_vs_u16mf4_u16m1_tu(vuint16m1_t vd, vuint16mf4_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredminu_vs_u16mf4_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16mf2_u16m1_tu(vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredminu_vs_u16mf2_u16m1_tu(vuint16m1_t vd, vuint16mf2_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredminu_vs_u16mf2_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m1_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredminu_vs_u16m1_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredminu_vs_u16m1_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m2_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredminu_vs_u16m2_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredminu_vs_u16m2_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m4_u16m1_tu(vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredminu_vs_u16m4_u16m1_tu(vuint16m1_t vd, vuint16m4_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredminu_vs_u16m4_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m8_u16m1_tu(vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredminu_vs_u16m8_u16m1_tu(vuint16m1_t vd, vuint16m8_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredminu_vs_u16m8_u16m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vredminu_vs_u32mf2_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredminu_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vredminu_vs_u32m1_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m2_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredminu_vs_u32m2_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vredminu_vs_u32m2_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m4_u32m1_tu(vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredminu_vs_u32m4_u32m1_tu(vuint32m1_t vd, vuint32m4_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vredminu_vs_u32m4_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m8_u32m1_tu(vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredminu_vs_u32m8_u32m1_tu(vuint32m1_t vd, vuint32m8_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vredminu_vs_u32m8_u32m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m1_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredminu_vs_u64m1_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vredminu_vs_u64m1_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m2_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredminu_vs_u64m2_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vredminu_vs_u64m2_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m4_u64m1_tu(vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredminu_vs_u64m4_u64m1_tu(vuint64m1_t vd, vuint64m4_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vredminu_vs_u64m4_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m8_u64m1_tu(vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredminu_vs_u64m8_u64m1_tu(vuint64m1_t vd, vuint64m8_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vredminu_vs_u64m8_u64m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8mf8_u8m1_tum(vbool64_t vm, vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredminu_vs_u8mf8_u8m1_tum(vbool64_t vm, vuint8m1_t vd, + vuint8mf8_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredminu_vs_u8mf8_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8mf4_u8m1_tum(vbool32_t vm, vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredminu_vs_u8mf4_u8m1_tum(vbool32_t vm, vuint8m1_t vd, + vuint8mf4_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredminu_vs_u8mf4_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8mf2_u8m1_tum(vbool16_t vm, vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredminu_vs_u8mf2_u8m1_tum(vbool16_t vm, vuint8m1_t vd, + vuint8mf2_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredminu_vs_u8mf2_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m1_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredminu_vs_u8m1_u8m1_tum(vbool8_t vm, vuint8m1_t vd, + vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredminu_vs_u8m1_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m2_u8m1_tum(vbool4_t vm, vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredminu_vs_u8m2_u8m1_tum(vbool4_t vm, vuint8m1_t vd, + vuint8m2_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredminu_vs_u8m2_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m4_u8m1_tum(vbool2_t vm, vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredminu_vs_u8m4_u8m1_tum(vbool2_t vm, vuint8m1_t vd, + vuint8m4_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredminu_vs_u8m4_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredminu_vs_u8m8_u8m1_tum(vbool1_t vm, vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredminu_vs_u8m8_u8m1_tum(vbool1_t vm, vuint8m1_t vd, + vuint8m8_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredminu_vs_u8m8_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16mf4_u16m1_tum(vbool64_t vm, vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredminu_vs_u16mf4_u16m1_tum(vbool64_t vm, vuint16m1_t vd, + vuint16mf4_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredminu_vs_u16mf4_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16mf2_u16m1_tum(vbool32_t vm, vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredminu_vs_u16mf2_u16m1_tum(vbool32_t vm, vuint16m1_t vd, + vuint16mf2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredminu_vs_u16mf2_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredminu_vs_u16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredminu_vs_u16m1_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m2_u16m1_tum(vbool8_t vm, vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredminu_vs_u16m2_u16m1_tum(vbool8_t vm, vuint16m1_t vd, + vuint16m2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredminu_vs_u16m2_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m4_u16m1_tum(vbool4_t vm, vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredminu_vs_u16m4_u16m1_tum(vbool4_t vm, vuint16m1_t vd, + vuint16m4_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredminu_vs_u16m4_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredminu_vs_u16m8_u16m1_tum(vbool2_t vm, vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredminu_vs_u16m8_u16m1_tum(vbool2_t vm, vuint16m1_t vd, + vuint16m8_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredminu_vs_u16m8_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tum(vbool64_t vm, vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredminu_vs_u32mf2_u32m1_tum(vbool64_t vm, vuint32m1_t vd, + vuint32mf2_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vredminu_vs_u32mf2_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m1_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredminu_vs_u32m1_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vredminu_vs_u32m1_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m2_u32m1_tum(vbool16_t vm, vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredminu_vs_u32m2_u32m1_tum(vbool16_t vm, vuint32m1_t vd, + vuint32m2_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vredminu_vs_u32m2_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m4_u32m1_tum(vbool8_t vm, vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredminu_vs_u32m4_u32m1_tum(vbool8_t vm, vuint32m1_t vd, + vuint32m4_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vredminu_vs_u32m4_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredminu_vs_u32m8_u32m1_tum(vbool4_t vm, vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredminu_vs_u32m8_u32m1_tum(vbool4_t vm, vuint32m1_t vd, + vuint32m8_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vredminu_vs_u32m8_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m1_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredminu_vs_u64m1_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vredminu_vs_u64m1_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m2_u64m1_tum(vbool32_t vm, vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredminu_vs_u64m2_u64m1_tum(vbool32_t vm, vuint64m1_t vd, + vuint64m2_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vredminu_vs_u64m2_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m4_u64m1_tum(vbool16_t vm, vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredminu_vs_u64m4_u64m1_tum(vbool16_t vm, vuint64m1_t vd, + vuint64m4_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vredminu_vs_u64m4_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredminu_vs_u64m8_u64m1_tum(vbool8_t vm, vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredminu_vs_u64m8_u64m1_tum(vbool8_t vm, vuint64m1_t vd, + vuint64m8_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vredminu_vs_u64m8_u64m1_tum(vm, vd, vs2, vs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vredor.c b/auto-generated/policy_funcs/llvm-api-tests/vredor.c index 4db6ceec7..7c6dfa7d9 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vredor.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vredor.c @@ -5,354 +5,482 @@ #include -vint8m1_t test_vredor_vs_i8mf8_i8m1_tu(vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredor_vs_i8mf8_i8m1_tu(vint8m1_t vd, vint8mf8_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredor_vs_i8mf8_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8mf4_i8m1_tu(vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredor_vs_i8mf4_i8m1_tu(vint8m1_t vd, vint8mf4_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredor_vs_i8mf4_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8mf2_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredor_vs_i8mf2_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredor_vs_i8mf2_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m1_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredor_vs_i8m1_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredor_vs_i8m1_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m2_i8m1_tu(vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredor_vs_i8m2_i8m1_tu(vint8m1_t vd, vint8m2_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredor_vs_i8m2_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m4_i8m1_tu(vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredor_vs_i8m4_i8m1_tu(vint8m1_t vd, vint8m4_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredor_vs_i8m4_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m8_i8m1_tu(vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredor_vs_i8m8_i8m1_tu(vint8m1_t vd, vint8m8_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredor_vs_i8m8_i8m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16mf4_i16m1_tu(vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredor_vs_i16mf4_i16m1_tu(vint16m1_t vd, vint16mf4_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredor_vs_i16mf4_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16mf2_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredor_vs_i16mf2_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredor_vs_i16mf2_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m1_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredor_vs_i16m1_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredor_vs_i16m1_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m2_i16m1_tu(vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredor_vs_i16m2_i16m1_tu(vint16m1_t vd, vint16m2_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredor_vs_i16m2_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m4_i16m1_tu(vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredor_vs_i16m4_i16m1_tu(vint16m1_t vd, vint16m4_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredor_vs_i16m4_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m8_i16m1_tu(vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredor_vs_i16m8_i16m1_tu(vint16m1_t vd, vint16m8_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredor_vs_i16m8_i16m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32mf2_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredor_vs_i32mf2_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vredor_vs_i32mf2_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m1_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredor_vs_i32m1_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vredor_vs_i32m1_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m2_i32m1_tu(vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredor_vs_i32m2_i32m1_tu(vint32m1_t vd, vint32m2_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vredor_vs_i32m2_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m4_i32m1_tu(vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredor_vs_i32m4_i32m1_tu(vint32m1_t vd, vint32m4_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vredor_vs_i32m4_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m8_i32m1_tu(vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredor_vs_i32m8_i32m1_tu(vint32m1_t vd, vint32m8_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vredor_vs_i32m8_i32m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m1_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredor_vs_i64m1_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vredor_vs_i64m1_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m2_i64m1_tu(vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredor_vs_i64m2_i64m1_tu(vint64m1_t vd, vint64m2_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vredor_vs_i64m2_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m4_i64m1_tu(vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredor_vs_i64m4_i64m1_tu(vint64m1_t vd, vint64m4_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vredor_vs_i64m4_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m8_i64m1_tu(vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredor_vs_i64m8_i64m1_tu(vint64m1_t vd, vint64m8_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vredor_vs_i64m8_i64m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8mf8_u8m1_tu(vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredor_vs_u8mf8_u8m1_tu(vuint8m1_t vd, vuint8mf8_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredor_vs_u8mf8_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8mf4_u8m1_tu(vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredor_vs_u8mf4_u8m1_tu(vuint8m1_t vd, vuint8mf4_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredor_vs_u8mf4_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8mf2_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredor_vs_u8mf2_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredor_vs_u8mf2_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m1_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredor_vs_u8m1_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredor_vs_u8m1_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m2_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredor_vs_u8m2_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredor_vs_u8m2_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m4_u8m1_tu(vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredor_vs_u8m4_u8m1_tu(vuint8m1_t vd, vuint8m4_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredor_vs_u8m4_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m8_u8m1_tu(vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredor_vs_u8m8_u8m1_tu(vuint8m1_t vd, vuint8m8_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredor_vs_u8m8_u8m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16mf4_u16m1_tu(vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredor_vs_u16mf4_u16m1_tu(vuint16m1_t vd, vuint16mf4_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredor_vs_u16mf4_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16mf2_u16m1_tu(vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredor_vs_u16mf2_u16m1_tu(vuint16m1_t vd, vuint16mf2_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredor_vs_u16mf2_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m1_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredor_vs_u16m1_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredor_vs_u16m1_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m2_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredor_vs_u16m2_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredor_vs_u16m2_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m4_u16m1_tu(vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredor_vs_u16m4_u16m1_tu(vuint16m1_t vd, vuint16m4_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredor_vs_u16m4_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m8_u16m1_tu(vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredor_vs_u16m8_u16m1_tu(vuint16m1_t vd, vuint16m8_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredor_vs_u16m8_u16m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredor_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vredor_vs_u32mf2_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredor_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vredor_vs_u32m1_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m2_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredor_vs_u32m2_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vredor_vs_u32m2_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m4_u32m1_tu(vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredor_vs_u32m4_u32m1_tu(vuint32m1_t vd, vuint32m4_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vredor_vs_u32m4_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m8_u32m1_tu(vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredor_vs_u32m8_u32m1_tu(vuint32m1_t vd, vuint32m8_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vredor_vs_u32m8_u32m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m1_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredor_vs_u64m1_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vredor_vs_u64m1_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m2_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredor_vs_u64m2_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vredor_vs_u64m2_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m4_u64m1_tu(vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredor_vs_u64m4_u64m1_tu(vuint64m1_t vd, vuint64m4_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vredor_vs_u64m4_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m8_u64m1_tu(vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredor_vs_u64m8_u64m1_tu(vuint64m1_t vd, vuint64m8_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vredor_vs_u64m8_u64m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8mf8_i8m1_tum(vbool64_t vm, vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredor_vs_i8mf8_i8m1_tum(vbool64_t vm, vint8m1_t vd, + vint8mf8_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredor_vs_i8mf8_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8mf4_i8m1_tum(vbool32_t vm, vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredor_vs_i8mf4_i8m1_tum(vbool32_t vm, vint8m1_t vd, + vint8mf4_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredor_vs_i8mf4_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8mf2_i8m1_tum(vbool16_t vm, vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredor_vs_i8mf2_i8m1_tum(vbool16_t vm, vint8m1_t vd, + vint8mf2_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredor_vs_i8mf2_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m1_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredor_vs_i8m1_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredor_vs_i8m1_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m2_i8m1_tum(vbool4_t vm, vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredor_vs_i8m2_i8m1_tum(vbool4_t vm, vint8m1_t vd, vint8m2_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredor_vs_i8m2_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m4_i8m1_tum(vbool2_t vm, vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredor_vs_i8m4_i8m1_tum(vbool2_t vm, vint8m1_t vd, vint8m4_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredor_vs_i8m4_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredor_vs_i8m8_i8m1_tum(vbool1_t vm, vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredor_vs_i8m8_i8m1_tum(vbool1_t vm, vint8m1_t vd, vint8m8_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredor_vs_i8m8_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16mf4_i16m1_tum(vbool64_t vm, vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredor_vs_i16mf4_i16m1_tum(vbool64_t vm, vint16m1_t vd, + vint16mf4_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredor_vs_i16mf4_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16mf2_i16m1_tum(vbool32_t vm, vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredor_vs_i16mf2_i16m1_tum(vbool32_t vm, vint16m1_t vd, + vint16mf2_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredor_vs_i16mf2_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m1_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredor_vs_i16m1_i16m1_tum(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredor_vs_i16m1_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m2_i16m1_tum(vbool8_t vm, vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredor_vs_i16m2_i16m1_tum(vbool8_t vm, vint16m1_t vd, + vint16m2_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredor_vs_i16m2_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m4_i16m1_tum(vbool4_t vm, vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredor_vs_i16m4_i16m1_tum(vbool4_t vm, vint16m1_t vd, + vint16m4_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredor_vs_i16m4_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredor_vs_i16m8_i16m1_tum(vbool2_t vm, vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredor_vs_i16m8_i16m1_tum(vbool2_t vm, vint16m1_t vd, + vint16m8_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredor_vs_i16m8_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32mf2_i32m1_tum(vbool64_t vm, vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredor_vs_i32mf2_i32m1_tum(vbool64_t vm, vint32m1_t vd, + vint32mf2_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vredor_vs_i32mf2_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m1_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredor_vs_i32m1_i32m1_tum(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vredor_vs_i32m1_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m2_i32m1_tum(vbool16_t vm, vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredor_vs_i32m2_i32m1_tum(vbool16_t vm, vint32m1_t vd, + vint32m2_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vredor_vs_i32m2_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m4_i32m1_tum(vbool8_t vm, vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredor_vs_i32m4_i32m1_tum(vbool8_t vm, vint32m1_t vd, + vint32m4_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vredor_vs_i32m4_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredor_vs_i32m8_i32m1_tum(vbool4_t vm, vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredor_vs_i32m8_i32m1_tum(vbool4_t vm, vint32m1_t vd, + vint32m8_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vredor_vs_i32m8_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m1_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredor_vs_i64m1_i64m1_tum(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vredor_vs_i64m1_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m2_i64m1_tum(vbool32_t vm, vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredor_vs_i64m2_i64m1_tum(vbool32_t vm, vint64m1_t vd, + vint64m2_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vredor_vs_i64m2_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m4_i64m1_tum(vbool16_t vm, vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredor_vs_i64m4_i64m1_tum(vbool16_t vm, vint64m1_t vd, + vint64m4_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vredor_vs_i64m4_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredor_vs_i64m8_i64m1_tum(vbool8_t vm, vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredor_vs_i64m8_i64m1_tum(vbool8_t vm, vint64m1_t vd, + vint64m8_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vredor_vs_i64m8_i64m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8mf8_u8m1_tum(vbool64_t vm, vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredor_vs_u8mf8_u8m1_tum(vbool64_t vm, vuint8m1_t vd, + vuint8mf8_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredor_vs_u8mf8_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8mf4_u8m1_tum(vbool32_t vm, vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredor_vs_u8mf4_u8m1_tum(vbool32_t vm, vuint8m1_t vd, + vuint8mf4_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredor_vs_u8mf4_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8mf2_u8m1_tum(vbool16_t vm, vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredor_vs_u8mf2_u8m1_tum(vbool16_t vm, vuint8m1_t vd, + vuint8mf2_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredor_vs_u8mf2_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m1_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredor_vs_u8m1_u8m1_tum(vbool8_t vm, vuint8m1_t vd, + vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredor_vs_u8m1_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m2_u8m1_tum(vbool4_t vm, vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredor_vs_u8m2_u8m1_tum(vbool4_t vm, vuint8m1_t vd, + vuint8m2_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredor_vs_u8m2_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m4_u8m1_tum(vbool2_t vm, vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredor_vs_u8m4_u8m1_tum(vbool2_t vm, vuint8m1_t vd, + vuint8m4_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredor_vs_u8m4_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredor_vs_u8m8_u8m1_tum(vbool1_t vm, vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredor_vs_u8m8_u8m1_tum(vbool1_t vm, vuint8m1_t vd, + vuint8m8_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredor_vs_u8m8_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16mf4_u16m1_tum(vbool64_t vm, vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredor_vs_u16mf4_u16m1_tum(vbool64_t vm, vuint16m1_t vd, + vuint16mf4_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredor_vs_u16mf4_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16mf2_u16m1_tum(vbool32_t vm, vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredor_vs_u16mf2_u16m1_tum(vbool32_t vm, vuint16m1_t vd, + vuint16mf2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredor_vs_u16mf2_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredor_vs_u16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredor_vs_u16m1_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m2_u16m1_tum(vbool8_t vm, vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredor_vs_u16m2_u16m1_tum(vbool8_t vm, vuint16m1_t vd, + vuint16m2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredor_vs_u16m2_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m4_u16m1_tum(vbool4_t vm, vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredor_vs_u16m4_u16m1_tum(vbool4_t vm, vuint16m1_t vd, + vuint16m4_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredor_vs_u16m4_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredor_vs_u16m8_u16m1_tum(vbool2_t vm, vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredor_vs_u16m8_u16m1_tum(vbool2_t vm, vuint16m1_t vd, + vuint16m8_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredor_vs_u16m8_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32mf2_u32m1_tum(vbool64_t vm, vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredor_vs_u32mf2_u32m1_tum(vbool64_t vm, vuint32m1_t vd, + vuint32mf2_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vredor_vs_u32mf2_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m1_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredor_vs_u32m1_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vredor_vs_u32m1_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m2_u32m1_tum(vbool16_t vm, vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredor_vs_u32m2_u32m1_tum(vbool16_t vm, vuint32m1_t vd, + vuint32m2_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vredor_vs_u32m2_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m4_u32m1_tum(vbool8_t vm, vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredor_vs_u32m4_u32m1_tum(vbool8_t vm, vuint32m1_t vd, + vuint32m4_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vredor_vs_u32m4_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredor_vs_u32m8_u32m1_tum(vbool4_t vm, vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredor_vs_u32m8_u32m1_tum(vbool4_t vm, vuint32m1_t vd, + vuint32m8_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vredor_vs_u32m8_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m1_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredor_vs_u64m1_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vredor_vs_u64m1_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m2_u64m1_tum(vbool32_t vm, vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredor_vs_u64m2_u64m1_tum(vbool32_t vm, vuint64m1_t vd, + vuint64m2_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vredor_vs_u64m2_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m4_u64m1_tum(vbool16_t vm, vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredor_vs_u64m4_u64m1_tum(vbool16_t vm, vuint64m1_t vd, + vuint64m4_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vredor_vs_u64m4_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredor_vs_u64m8_u64m1_tum(vbool8_t vm, vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredor_vs_u64m8_u64m1_tum(vbool8_t vm, vuint64m1_t vd, + vuint64m8_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vredor_vs_u64m8_u64m1_tum(vm, vd, vs2, vs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vredsum.c b/auto-generated/policy_funcs/llvm-api-tests/vredsum.c index 19cf17ccd..d341f8e56 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vredsum.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vredsum.c @@ -5,354 +5,486 @@ #include -vint8m1_t test_vredsum_vs_i8mf8_i8m1_tu(vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredsum_vs_i8mf8_i8m1_tu(vint8m1_t vd, vint8mf8_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredsum_vs_i8mf8_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8mf4_i8m1_tu(vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredsum_vs_i8mf4_i8m1_tu(vint8m1_t vd, vint8mf4_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredsum_vs_i8mf4_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8mf2_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredsum_vs_i8mf2_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredsum_vs_i8mf2_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m1_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredsum_vs_i8m1_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredsum_vs_i8m1_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m2_i8m1_tu(vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredsum_vs_i8m2_i8m1_tu(vint8m1_t vd, vint8m2_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredsum_vs_i8m2_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m4_i8m1_tu(vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredsum_vs_i8m4_i8m1_tu(vint8m1_t vd, vint8m4_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredsum_vs_i8m4_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m8_i8m1_tu(vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredsum_vs_i8m8_i8m1_tu(vint8m1_t vd, vint8m8_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredsum_vs_i8m8_i8m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16mf4_i16m1_tu(vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredsum_vs_i16mf4_i16m1_tu(vint16m1_t vd, vint16mf4_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredsum_vs_i16mf4_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16mf2_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredsum_vs_i16mf2_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredsum_vs_i16mf2_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m1_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredsum_vs_i16m1_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredsum_vs_i16m1_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m2_i16m1_tu(vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredsum_vs_i16m2_i16m1_tu(vint16m1_t vd, vint16m2_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredsum_vs_i16m2_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m4_i16m1_tu(vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredsum_vs_i16m4_i16m1_tu(vint16m1_t vd, vint16m4_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredsum_vs_i16m4_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m8_i16m1_tu(vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredsum_vs_i16m8_i16m1_tu(vint16m1_t vd, vint16m8_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredsum_vs_i16m8_i16m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32mf2_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredsum_vs_i32mf2_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vredsum_vs_i32mf2_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m1_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredsum_vs_i32m1_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vredsum_vs_i32m1_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m2_i32m1_tu(vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredsum_vs_i32m2_i32m1_tu(vint32m1_t vd, vint32m2_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vredsum_vs_i32m2_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m4_i32m1_tu(vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredsum_vs_i32m4_i32m1_tu(vint32m1_t vd, vint32m4_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vredsum_vs_i32m4_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m8_i32m1_tu(vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredsum_vs_i32m8_i32m1_tu(vint32m1_t vd, vint32m8_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vredsum_vs_i32m8_i32m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m1_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredsum_vs_i64m1_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vredsum_vs_i64m1_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m2_i64m1_tu(vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredsum_vs_i64m2_i64m1_tu(vint64m1_t vd, vint64m2_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vredsum_vs_i64m2_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m4_i64m1_tu(vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredsum_vs_i64m4_i64m1_tu(vint64m1_t vd, vint64m4_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vredsum_vs_i64m4_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m8_i64m1_tu(vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredsum_vs_i64m8_i64m1_tu(vint64m1_t vd, vint64m8_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vredsum_vs_i64m8_i64m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8mf8_u8m1_tu(vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredsum_vs_u8mf8_u8m1_tu(vuint8m1_t vd, vuint8mf8_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredsum_vs_u8mf8_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8mf4_u8m1_tu(vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredsum_vs_u8mf4_u8m1_tu(vuint8m1_t vd, vuint8mf4_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredsum_vs_u8mf4_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8mf2_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredsum_vs_u8mf2_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredsum_vs_u8mf2_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m1_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredsum_vs_u8m1_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredsum_vs_u8m1_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m2_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredsum_vs_u8m2_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredsum_vs_u8m2_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m4_u8m1_tu(vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredsum_vs_u8m4_u8m1_tu(vuint8m1_t vd, vuint8m4_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredsum_vs_u8m4_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m8_u8m1_tu(vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredsum_vs_u8m8_u8m1_tu(vuint8m1_t vd, vuint8m8_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredsum_vs_u8m8_u8m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16mf4_u16m1_tu(vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredsum_vs_u16mf4_u16m1_tu(vuint16m1_t vd, vuint16mf4_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredsum_vs_u16mf4_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16mf2_u16m1_tu(vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredsum_vs_u16mf2_u16m1_tu(vuint16m1_t vd, vuint16mf2_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredsum_vs_u16mf2_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m1_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredsum_vs_u16m1_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredsum_vs_u16m1_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m2_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredsum_vs_u16m2_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredsum_vs_u16m2_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m4_u16m1_tu(vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredsum_vs_u16m4_u16m1_tu(vuint16m1_t vd, vuint16m4_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredsum_vs_u16m4_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m8_u16m1_tu(vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredsum_vs_u16m8_u16m1_tu(vuint16m1_t vd, vuint16m8_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredsum_vs_u16m8_u16m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vredsum_vs_u32mf2_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredsum_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vredsum_vs_u32m1_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m2_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredsum_vs_u32m2_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vredsum_vs_u32m2_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m4_u32m1_tu(vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredsum_vs_u32m4_u32m1_tu(vuint32m1_t vd, vuint32m4_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vredsum_vs_u32m4_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m8_u32m1_tu(vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredsum_vs_u32m8_u32m1_tu(vuint32m1_t vd, vuint32m8_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vredsum_vs_u32m8_u32m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m1_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredsum_vs_u64m1_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vredsum_vs_u64m1_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m2_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredsum_vs_u64m2_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vredsum_vs_u64m2_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m4_u64m1_tu(vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredsum_vs_u64m4_u64m1_tu(vuint64m1_t vd, vuint64m4_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vredsum_vs_u64m4_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m8_u64m1_tu(vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredsum_vs_u64m8_u64m1_tu(vuint64m1_t vd, vuint64m8_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vredsum_vs_u64m8_u64m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8mf8_i8m1_tum(vbool64_t vm, vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredsum_vs_i8mf8_i8m1_tum(vbool64_t vm, vint8m1_t vd, + vint8mf8_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_i8mf8_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8mf4_i8m1_tum(vbool32_t vm, vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredsum_vs_i8mf4_i8m1_tum(vbool32_t vm, vint8m1_t vd, + vint8mf4_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_i8mf4_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8mf2_i8m1_tum(vbool16_t vm, vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredsum_vs_i8mf2_i8m1_tum(vbool16_t vm, vint8m1_t vd, + vint8mf2_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_i8mf2_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m1_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredsum_vs_i8m1_i8m1_tum(vbool8_t vm, vint8m1_t vd, + vint8m1_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_i8m1_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m2_i8m1_tum(vbool4_t vm, vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredsum_vs_i8m2_i8m1_tum(vbool4_t vm, vint8m1_t vd, + vint8m2_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_i8m2_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m4_i8m1_tum(vbool2_t vm, vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredsum_vs_i8m4_i8m1_tum(vbool2_t vm, vint8m1_t vd, + vint8m4_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_i8m4_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredsum_vs_i8m8_i8m1_tum(vbool1_t vm, vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredsum_vs_i8m8_i8m1_tum(vbool1_t vm, vint8m1_t vd, + vint8m8_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_i8m8_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16mf4_i16m1_tum(vbool64_t vm, vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredsum_vs_i16mf4_i16m1_tum(vbool64_t vm, vint16m1_t vd, + vint16mf4_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_i16mf4_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16mf2_i16m1_tum(vbool32_t vm, vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredsum_vs_i16mf2_i16m1_tum(vbool32_t vm, vint16m1_t vd, + vint16mf2_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_i16mf2_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m1_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredsum_vs_i16m1_i16m1_tum(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_i16m1_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m2_i16m1_tum(vbool8_t vm, vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredsum_vs_i16m2_i16m1_tum(vbool8_t vm, vint16m1_t vd, + vint16m2_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_i16m2_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m4_i16m1_tum(vbool4_t vm, vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredsum_vs_i16m4_i16m1_tum(vbool4_t vm, vint16m1_t vd, + vint16m4_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_i16m4_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredsum_vs_i16m8_i16m1_tum(vbool2_t vm, vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredsum_vs_i16m8_i16m1_tum(vbool2_t vm, vint16m1_t vd, + vint16m8_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_i16m8_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32mf2_i32m1_tum(vbool64_t vm, vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredsum_vs_i32mf2_i32m1_tum(vbool64_t vm, vint32m1_t vd, + vint32mf2_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_i32mf2_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m1_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredsum_vs_i32m1_i32m1_tum(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_i32m1_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m2_i32m1_tum(vbool16_t vm, vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredsum_vs_i32m2_i32m1_tum(vbool16_t vm, vint32m1_t vd, + vint32m2_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_i32m2_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m4_i32m1_tum(vbool8_t vm, vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredsum_vs_i32m4_i32m1_tum(vbool8_t vm, vint32m1_t vd, + vint32m4_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_i32m4_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredsum_vs_i32m8_i32m1_tum(vbool4_t vm, vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredsum_vs_i32m8_i32m1_tum(vbool4_t vm, vint32m1_t vd, + vint32m8_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_i32m8_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m1_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredsum_vs_i64m1_i64m1_tum(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_i64m1_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m2_i64m1_tum(vbool32_t vm, vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredsum_vs_i64m2_i64m1_tum(vbool32_t vm, vint64m1_t vd, + vint64m2_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_i64m2_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m4_i64m1_tum(vbool16_t vm, vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredsum_vs_i64m4_i64m1_tum(vbool16_t vm, vint64m1_t vd, + vint64m4_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_i64m4_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredsum_vs_i64m8_i64m1_tum(vbool8_t vm, vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredsum_vs_i64m8_i64m1_tum(vbool8_t vm, vint64m1_t vd, + vint64m8_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_i64m8_i64m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8mf8_u8m1_tum(vbool64_t vm, vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredsum_vs_u8mf8_u8m1_tum(vbool64_t vm, vuint8m1_t vd, + vuint8mf8_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_u8mf8_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8mf4_u8m1_tum(vbool32_t vm, vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredsum_vs_u8mf4_u8m1_tum(vbool32_t vm, vuint8m1_t vd, + vuint8mf4_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_u8mf4_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8mf2_u8m1_tum(vbool16_t vm, vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredsum_vs_u8mf2_u8m1_tum(vbool16_t vm, vuint8m1_t vd, + vuint8mf2_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_u8mf2_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m1_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredsum_vs_u8m1_u8m1_tum(vbool8_t vm, vuint8m1_t vd, + vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_u8m1_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m2_u8m1_tum(vbool4_t vm, vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredsum_vs_u8m2_u8m1_tum(vbool4_t vm, vuint8m1_t vd, + vuint8m2_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_u8m2_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m4_u8m1_tum(vbool2_t vm, vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredsum_vs_u8m4_u8m1_tum(vbool2_t vm, vuint8m1_t vd, + vuint8m4_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_u8m4_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredsum_vs_u8m8_u8m1_tum(vbool1_t vm, vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredsum_vs_u8m8_u8m1_tum(vbool1_t vm, vuint8m1_t vd, + vuint8m8_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_u8m8_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16mf4_u16m1_tum(vbool64_t vm, vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredsum_vs_u16mf4_u16m1_tum(vbool64_t vm, vuint16m1_t vd, + vuint16mf4_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_u16mf4_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16mf2_u16m1_tum(vbool32_t vm, vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredsum_vs_u16mf2_u16m1_tum(vbool32_t vm, vuint16m1_t vd, + vuint16mf2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_u16mf2_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredsum_vs_u16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_u16m1_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m2_u16m1_tum(vbool8_t vm, vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredsum_vs_u16m2_u16m1_tum(vbool8_t vm, vuint16m1_t vd, + vuint16m2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_u16m2_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m4_u16m1_tum(vbool4_t vm, vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredsum_vs_u16m4_u16m1_tum(vbool4_t vm, vuint16m1_t vd, + vuint16m4_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_u16m4_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredsum_vs_u16m8_u16m1_tum(vbool2_t vm, vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredsum_vs_u16m8_u16m1_tum(vbool2_t vm, vuint16m1_t vd, + vuint16m8_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_u16m8_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tum(vbool64_t vm, vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredsum_vs_u32mf2_u32m1_tum(vbool64_t vm, vuint32m1_t vd, + vuint32mf2_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_u32mf2_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m1_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredsum_vs_u32m1_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_u32m1_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m2_u32m1_tum(vbool16_t vm, vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredsum_vs_u32m2_u32m1_tum(vbool16_t vm, vuint32m1_t vd, + vuint32m2_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_u32m2_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m4_u32m1_tum(vbool8_t vm, vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredsum_vs_u32m4_u32m1_tum(vbool8_t vm, vuint32m1_t vd, + vuint32m4_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_u32m4_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredsum_vs_u32m8_u32m1_tum(vbool4_t vm, vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredsum_vs_u32m8_u32m1_tum(vbool4_t vm, vuint32m1_t vd, + vuint32m8_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_u32m8_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m1_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredsum_vs_u64m1_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_u64m1_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m2_u64m1_tum(vbool32_t vm, vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredsum_vs_u64m2_u64m1_tum(vbool32_t vm, vuint64m1_t vd, + vuint64m2_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_u64m2_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m4_u64m1_tum(vbool16_t vm, vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredsum_vs_u64m4_u64m1_tum(vbool16_t vm, vuint64m1_t vd, + vuint64m4_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_u64m4_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredsum_vs_u64m8_u64m1_tum(vbool8_t vm, vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredsum_vs_u64m8_u64m1_tum(vbool8_t vm, vuint64m1_t vd, + vuint64m8_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vredsum_vs_u64m8_u64m1_tum(vm, vd, vs2, vs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vredxor.c b/auto-generated/policy_funcs/llvm-api-tests/vredxor.c index 157b8dd62..6082d9d51 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vredxor.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vredxor.c @@ -5,354 +5,486 @@ #include -vint8m1_t test_vredxor_vs_i8mf8_i8m1_tu(vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredxor_vs_i8mf8_i8m1_tu(vint8m1_t vd, vint8mf8_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredxor_vs_i8mf8_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8mf4_i8m1_tu(vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredxor_vs_i8mf4_i8m1_tu(vint8m1_t vd, vint8mf4_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredxor_vs_i8mf4_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8mf2_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredxor_vs_i8mf2_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredxor_vs_i8mf2_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m1_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredxor_vs_i8m1_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredxor_vs_i8m1_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m2_i8m1_tu(vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredxor_vs_i8m2_i8m1_tu(vint8m1_t vd, vint8m2_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredxor_vs_i8m2_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m4_i8m1_tu(vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredxor_vs_i8m4_i8m1_tu(vint8m1_t vd, vint8m4_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredxor_vs_i8m4_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m8_i8m1_tu(vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredxor_vs_i8m8_i8m1_tu(vint8m1_t vd, vint8m8_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vredxor_vs_i8m8_i8m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16mf4_i16m1_tu(vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredxor_vs_i16mf4_i16m1_tu(vint16m1_t vd, vint16mf4_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredxor_vs_i16mf4_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16mf2_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredxor_vs_i16mf2_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredxor_vs_i16mf2_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m1_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredxor_vs_i16m1_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredxor_vs_i16m1_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m2_i16m1_tu(vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredxor_vs_i16m2_i16m1_tu(vint16m1_t vd, vint16m2_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredxor_vs_i16m2_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m4_i16m1_tu(vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredxor_vs_i16m4_i16m1_tu(vint16m1_t vd, vint16m4_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredxor_vs_i16m4_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m8_i16m1_tu(vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredxor_vs_i16m8_i16m1_tu(vint16m1_t vd, vint16m8_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vredxor_vs_i16m8_i16m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32mf2_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredxor_vs_i32mf2_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vredxor_vs_i32mf2_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m1_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredxor_vs_i32m1_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vredxor_vs_i32m1_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m2_i32m1_tu(vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredxor_vs_i32m2_i32m1_tu(vint32m1_t vd, vint32m2_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vredxor_vs_i32m2_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m4_i32m1_tu(vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredxor_vs_i32m4_i32m1_tu(vint32m1_t vd, vint32m4_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vredxor_vs_i32m4_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m8_i32m1_tu(vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredxor_vs_i32m8_i32m1_tu(vint32m1_t vd, vint32m8_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vredxor_vs_i32m8_i32m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m1_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredxor_vs_i64m1_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vredxor_vs_i64m1_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m2_i64m1_tu(vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredxor_vs_i64m2_i64m1_tu(vint64m1_t vd, vint64m2_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vredxor_vs_i64m2_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m4_i64m1_tu(vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredxor_vs_i64m4_i64m1_tu(vint64m1_t vd, vint64m4_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vredxor_vs_i64m4_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m8_i64m1_tu(vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredxor_vs_i64m8_i64m1_tu(vint64m1_t vd, vint64m8_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vredxor_vs_i64m8_i64m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8mf8_u8m1_tu(vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredxor_vs_u8mf8_u8m1_tu(vuint8m1_t vd, vuint8mf8_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredxor_vs_u8mf8_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8mf4_u8m1_tu(vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredxor_vs_u8mf4_u8m1_tu(vuint8m1_t vd, vuint8mf4_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredxor_vs_u8mf4_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8mf2_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredxor_vs_u8mf2_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredxor_vs_u8mf2_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m1_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredxor_vs_u8m1_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredxor_vs_u8m1_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m2_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredxor_vs_u8m2_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredxor_vs_u8m2_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m4_u8m1_tu(vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredxor_vs_u8m4_u8m1_tu(vuint8m1_t vd, vuint8m4_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredxor_vs_u8m4_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m8_u8m1_tu(vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredxor_vs_u8m8_u8m1_tu(vuint8m1_t vd, vuint8m8_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vredxor_vs_u8m8_u8m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16mf4_u16m1_tu(vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredxor_vs_u16mf4_u16m1_tu(vuint16m1_t vd, vuint16mf4_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredxor_vs_u16mf4_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16mf2_u16m1_tu(vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredxor_vs_u16mf2_u16m1_tu(vuint16m1_t vd, vuint16mf2_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredxor_vs_u16mf2_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m1_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredxor_vs_u16m1_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredxor_vs_u16m1_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m2_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredxor_vs_u16m2_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredxor_vs_u16m2_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m4_u16m1_tu(vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredxor_vs_u16m4_u16m1_tu(vuint16m1_t vd, vuint16m4_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredxor_vs_u16m4_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m8_u16m1_tu(vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredxor_vs_u16m8_u16m1_tu(vuint16m1_t vd, vuint16m8_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vredxor_vs_u16m8_u16m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vredxor_vs_u32mf2_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredxor_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vredxor_vs_u32m1_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m2_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredxor_vs_u32m2_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vredxor_vs_u32m2_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m4_u32m1_tu(vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredxor_vs_u32m4_u32m1_tu(vuint32m1_t vd, vuint32m4_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vredxor_vs_u32m4_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m8_u32m1_tu(vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredxor_vs_u32m8_u32m1_tu(vuint32m1_t vd, vuint32m8_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vredxor_vs_u32m8_u32m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m1_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredxor_vs_u64m1_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vredxor_vs_u64m1_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m2_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredxor_vs_u64m2_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vredxor_vs_u64m2_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m4_u64m1_tu(vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredxor_vs_u64m4_u64m1_tu(vuint64m1_t vd, vuint64m4_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vredxor_vs_u64m4_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m8_u64m1_tu(vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredxor_vs_u64m8_u64m1_tu(vuint64m1_t vd, vuint64m8_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vredxor_vs_u64m8_u64m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8mf8_i8m1_tum(vbool64_t vm, vint8m1_t vd, vint8mf8_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredxor_vs_i8mf8_i8m1_tum(vbool64_t vm, vint8m1_t vd, + vint8mf8_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_i8mf8_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8mf4_i8m1_tum(vbool32_t vm, vint8m1_t vd, vint8mf4_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredxor_vs_i8mf4_i8m1_tum(vbool32_t vm, vint8m1_t vd, + vint8mf4_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_i8mf4_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8mf2_i8m1_tum(vbool16_t vm, vint8m1_t vd, vint8mf2_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredxor_vs_i8mf2_i8m1_tum(vbool16_t vm, vint8m1_t vd, + vint8mf2_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_i8mf2_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m1_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredxor_vs_i8m1_i8m1_tum(vbool8_t vm, vint8m1_t vd, + vint8m1_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_i8m1_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m2_i8m1_tum(vbool4_t vm, vint8m1_t vd, vint8m2_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredxor_vs_i8m2_i8m1_tum(vbool4_t vm, vint8m1_t vd, + vint8m2_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_i8m2_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m4_i8m1_tum(vbool2_t vm, vint8m1_t vd, vint8m4_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredxor_vs_i8m4_i8m1_tum(vbool2_t vm, vint8m1_t vd, + vint8m4_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_i8m4_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vredxor_vs_i8m8_i8m1_tum(vbool1_t vm, vint8m1_t vd, vint8m8_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vredxor_vs_i8m8_i8m1_tum(vbool1_t vm, vint8m1_t vd, + vint8m8_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_i8m8_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16mf4_i16m1_tum(vbool64_t vm, vint16m1_t vd, vint16mf4_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredxor_vs_i16mf4_i16m1_tum(vbool64_t vm, vint16m1_t vd, + vint16mf4_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_i16mf4_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16mf2_i16m1_tum(vbool32_t vm, vint16m1_t vd, vint16mf2_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredxor_vs_i16mf2_i16m1_tum(vbool32_t vm, vint16m1_t vd, + vint16mf2_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_i16mf2_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m1_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredxor_vs_i16m1_i16m1_tum(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_i16m1_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m2_i16m1_tum(vbool8_t vm, vint16m1_t vd, vint16m2_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredxor_vs_i16m2_i16m1_tum(vbool8_t vm, vint16m1_t vd, + vint16m2_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_i16m2_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m4_i16m1_tum(vbool4_t vm, vint16m1_t vd, vint16m4_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredxor_vs_i16m4_i16m1_tum(vbool4_t vm, vint16m1_t vd, + vint16m4_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_i16m4_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vredxor_vs_i16m8_i16m1_tum(vbool2_t vm, vint16m1_t vd, vint16m8_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vredxor_vs_i16m8_i16m1_tum(vbool2_t vm, vint16m1_t vd, + vint16m8_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_i16m8_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32mf2_i32m1_tum(vbool64_t vm, vint32m1_t vd, vint32mf2_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredxor_vs_i32mf2_i32m1_tum(vbool64_t vm, vint32m1_t vd, + vint32mf2_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_i32mf2_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m1_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredxor_vs_i32m1_i32m1_tum(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_i32m1_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m2_i32m1_tum(vbool16_t vm, vint32m1_t vd, vint32m2_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredxor_vs_i32m2_i32m1_tum(vbool16_t vm, vint32m1_t vd, + vint32m2_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_i32m2_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m4_i32m1_tum(vbool8_t vm, vint32m1_t vd, vint32m4_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredxor_vs_i32m4_i32m1_tum(vbool8_t vm, vint32m1_t vd, + vint32m4_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_i32m4_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vredxor_vs_i32m8_i32m1_tum(vbool4_t vm, vint32m1_t vd, vint32m8_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vredxor_vs_i32m8_i32m1_tum(vbool4_t vm, vint32m1_t vd, + vint32m8_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_i32m8_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m1_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredxor_vs_i64m1_i64m1_tum(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_i64m1_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m2_i64m1_tum(vbool32_t vm, vint64m1_t vd, vint64m2_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredxor_vs_i64m2_i64m1_tum(vbool32_t vm, vint64m1_t vd, + vint64m2_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_i64m2_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m4_i64m1_tum(vbool16_t vm, vint64m1_t vd, vint64m4_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredxor_vs_i64m4_i64m1_tum(vbool16_t vm, vint64m1_t vd, + vint64m4_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_i64m4_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vredxor_vs_i64m8_i64m1_tum(vbool8_t vm, vint64m1_t vd, vint64m8_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vredxor_vs_i64m8_i64m1_tum(vbool8_t vm, vint64m1_t vd, + vint64m8_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_i64m8_i64m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8mf8_u8m1_tum(vbool64_t vm, vuint8m1_t vd, vuint8mf8_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredxor_vs_u8mf8_u8m1_tum(vbool64_t vm, vuint8m1_t vd, + vuint8mf8_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_u8mf8_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8mf4_u8m1_tum(vbool32_t vm, vuint8m1_t vd, vuint8mf4_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredxor_vs_u8mf4_u8m1_tum(vbool32_t vm, vuint8m1_t vd, + vuint8mf4_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_u8mf4_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8mf2_u8m1_tum(vbool16_t vm, vuint8m1_t vd, vuint8mf2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredxor_vs_u8mf2_u8m1_tum(vbool16_t vm, vuint8m1_t vd, + vuint8mf2_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_u8mf2_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m1_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredxor_vs_u8m1_u8m1_tum(vbool8_t vm, vuint8m1_t vd, + vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_u8m1_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m2_u8m1_tum(vbool4_t vm, vuint8m1_t vd, vuint8m2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredxor_vs_u8m2_u8m1_tum(vbool4_t vm, vuint8m1_t vd, + vuint8m2_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_u8m2_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m4_u8m1_tum(vbool2_t vm, vuint8m1_t vd, vuint8m4_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredxor_vs_u8m4_u8m1_tum(vbool2_t vm, vuint8m1_t vd, + vuint8m4_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_u8m4_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vredxor_vs_u8m8_u8m1_tum(vbool1_t vm, vuint8m1_t vd, vuint8m8_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vredxor_vs_u8m8_u8m1_tum(vbool1_t vm, vuint8m1_t vd, + vuint8m8_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_u8m8_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16mf4_u16m1_tum(vbool64_t vm, vuint16m1_t vd, vuint16mf4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredxor_vs_u16mf4_u16m1_tum(vbool64_t vm, vuint16m1_t vd, + vuint16mf4_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_u16mf4_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16mf2_u16m1_tum(vbool32_t vm, vuint16m1_t vd, vuint16mf2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredxor_vs_u16mf2_u16m1_tum(vbool32_t vm, vuint16m1_t vd, + vuint16mf2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_u16mf2_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredxor_vs_u16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_u16m1_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m2_u16m1_tum(vbool8_t vm, vuint16m1_t vd, vuint16m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredxor_vs_u16m2_u16m1_tum(vbool8_t vm, vuint16m1_t vd, + vuint16m2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_u16m2_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m4_u16m1_tum(vbool4_t vm, vuint16m1_t vd, vuint16m4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredxor_vs_u16m4_u16m1_tum(vbool4_t vm, vuint16m1_t vd, + vuint16m4_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_u16m4_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vredxor_vs_u16m8_u16m1_tum(vbool2_t vm, vuint16m1_t vd, vuint16m8_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vredxor_vs_u16m8_u16m1_tum(vbool2_t vm, vuint16m1_t vd, + vuint16m8_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_u16m8_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tum(vbool64_t vm, vuint32m1_t vd, vuint32mf2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredxor_vs_u32mf2_u32m1_tum(vbool64_t vm, vuint32m1_t vd, + vuint32mf2_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_u32mf2_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m1_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredxor_vs_u32m1_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_u32m1_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m2_u32m1_tum(vbool16_t vm, vuint32m1_t vd, vuint32m2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredxor_vs_u32m2_u32m1_tum(vbool16_t vm, vuint32m1_t vd, + vuint32m2_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_u32m2_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m4_u32m1_tum(vbool8_t vm, vuint32m1_t vd, vuint32m4_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredxor_vs_u32m4_u32m1_tum(vbool8_t vm, vuint32m1_t vd, + vuint32m4_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_u32m4_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vredxor_vs_u32m8_u32m1_tum(vbool4_t vm, vuint32m1_t vd, vuint32m8_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vredxor_vs_u32m8_u32m1_tum(vbool4_t vm, vuint32m1_t vd, + vuint32m8_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_u32m8_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m1_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredxor_vs_u64m1_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_u64m1_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m2_u64m1_tum(vbool32_t vm, vuint64m1_t vd, vuint64m2_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredxor_vs_u64m2_u64m1_tum(vbool32_t vm, vuint64m1_t vd, + vuint64m2_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_u64m2_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m4_u64m1_tum(vbool16_t vm, vuint64m1_t vd, vuint64m4_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredxor_vs_u64m4_u64m1_tum(vbool16_t vm, vuint64m1_t vd, + vuint64m4_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_u64m4_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vredxor_vs_u64m8_u64m1_tum(vbool8_t vm, vuint64m1_t vd, vuint64m8_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vredxor_vs_u64m8_u64m1_tum(vbool8_t vm, vuint64m1_t vd, + vuint64m8_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vredxor_vs_u64m8_u64m1_tum(vm, vd, vs2, vs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vrem.c b/auto-generated/policy_funcs/llvm-api-tests/vrem.c index 1f72bfa87..5ac98ab28 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vrem.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vrem.c @@ -5,706 +5,891 @@ #include -vint8mf8_t test_vrem_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vrem_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, + size_t vl) { return __riscv_vrem_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vrem_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vrem_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vrem_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vrem_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vrem_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, + size_t vl) { return __riscv_vrem_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vrem_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vrem_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vrem_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vrem_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vrem_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, + size_t vl) { return __riscv_vrem_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vrem_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vrem_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vrem_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vrem_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vrem_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vrem_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vrem_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vrem_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, + size_t vl) { return __riscv_vrem_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vrem_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vrem_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, + size_t vl) { return __riscv_vrem_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vrem_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vrem_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vrem_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vrem_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vrem_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, + size_t vl) { return __riscv_vrem_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vrem_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vrem_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vrem_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vrem_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vrem_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, + size_t vl) { return __riscv_vrem_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vrem_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vrem_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vrem_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vrem_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vrem_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + vint16mf4_t vs1, size_t vl) { return __riscv_vrem_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vrem_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vrem_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vrem_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vrem_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vrem_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vrem_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vrem_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vrem_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vrem_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vrem_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vrem_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vrem_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vrem_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vrem_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, + size_t vl) { return __riscv_vrem_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vrem_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vrem_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, + size_t vl) { return __riscv_vrem_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vrem_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vrem_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vrem_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vrem_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vrem_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, + size_t vl) { return __riscv_vrem_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vrem_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vrem_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vrem_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vrem_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vrem_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, + size_t vl) { return __riscv_vrem_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vrem_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vrem_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, + size_t vl) { return __riscv_vrem_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vrem_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vrem_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vrem_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vrem_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vrem_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vrem_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vrem_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vrem_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vrem_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vrem_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vrem_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, + size_t vl) { return __riscv_vrem_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vrem_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vrem_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, + size_t vl) { return __riscv_vrem_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vrem_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vrem_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vrem_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vrem_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vrem_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, + size_t vl) { return __riscv_vrem_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vrem_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vrem_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, + size_t vl) { return __riscv_vrem_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vrem_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vrem_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, + size_t vl) { return __riscv_vrem_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vrem_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vrem_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, + size_t vl) { return __riscv_vrem_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vrem_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vrem_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vrem_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vrem_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vrem_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, + size_t vl) { return __riscv_vrem_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vrem_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vrem_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, + size_t vl) { return __riscv_vrem_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vrem_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vrem_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, + size_t vl) { return __riscv_vrem_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vrem_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vrem_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, + size_t vl) { return __riscv_vrem_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vrem_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vrem_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, + size_t vl) { return __riscv_vrem_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vrem_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vrem_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, + size_t vl) { return __riscv_vrem_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vrem_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vrem_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, + size_t vl) { return __riscv_vrem_vx_i64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vrem_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vrem_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vrem_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrem_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vrem_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrem_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vrem_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vrem_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vrem_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrem_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vrem_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrem_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vrem_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vrem_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vrem_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrem_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vrem_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrem_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vrem_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vrem_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vrem_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrem_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vrem_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrem_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vrem_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vrem_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vrem_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrem_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vrem_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrem_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vrem_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vrem_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vrem_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrem_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vrem_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrem_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vrem_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vrem_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vrem_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vrem_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vrem_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrem_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vrem_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vrem_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vrem_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrem_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vrem_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vrem_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vrem_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vrem_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vrem_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrem_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vrem_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vrem_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vrem_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vrem_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vrem_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrem_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vrem_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vrem_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vrem_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vrem_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vrem_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrem_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vrem_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vrem_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vrem_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vrem_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vrem_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrem_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vrem_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vrem_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vrem_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vrem_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vrem_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrem_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vrem_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vrem_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vrem_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vrem_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vrem_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrem_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vrem_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vrem_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vrem_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vrem_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vrem_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrem_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vrem_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vrem_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vrem_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vrem_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vrem_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrem_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vrem_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vrem_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vrem_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vrem_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vrem_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrem_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vrem_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vrem_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vrem_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vrem_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vrem_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrem_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vrem_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vrem_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vrem_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vrem_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vrem_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrem_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vrem_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vrem_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vrem_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vrem_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vrem_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrem_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vrem_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vrem_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vrem_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vrem_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vrem_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrem_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vrem_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vrem_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vrem_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vrem_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vrem_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrem_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vrem_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vrem_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vrem_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vrem_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vrem_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrem_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vrem_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrem_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vrem_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vrem_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vrem_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrem_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vrem_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrem_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vrem_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vrem_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vrem_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrem_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vrem_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrem_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vrem_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vrem_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vrem_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrem_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vrem_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrem_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vrem_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vrem_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vrem_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrem_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vrem_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrem_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vrem_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vrem_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vrem_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrem_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vrem_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrem_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vrem_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vrem_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vrem_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vrem_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vrem_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrem_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vrem_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vrem_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vrem_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrem_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vrem_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vrem_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vrem_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vrem_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vrem_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrem_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vrem_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vrem_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vrem_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vrem_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vrem_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrem_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vrem_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vrem_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vrem_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vrem_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vrem_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrem_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vrem_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vrem_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vrem_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vrem_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vrem_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrem_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vrem_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vrem_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vrem_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vrem_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vrem_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrem_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vrem_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vrem_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vrem_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vrem_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vrem_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrem_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vrem_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vrem_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vrem_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vrem_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vrem_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrem_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vrem_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vrem_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vrem_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vrem_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vrem_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrem_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vrem_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vrem_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vrem_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vrem_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vrem_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrem_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vrem_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vrem_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vrem_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vrem_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vrem_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrem_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vrem_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vrem_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vrem_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vrem_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vrem_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrem_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vrem_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vrem_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vrem_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vrem_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vrem_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrem_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vrem_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vrem_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vrem_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vrem_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vrem_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrem_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vrem_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vrem_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vrem_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vrem_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vrem_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrem_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vrem_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vrem_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vrem_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vrem_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vrem_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrem_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vrem_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrem_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vrem_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vrem_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vrem_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrem_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vrem_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrem_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vrem_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vrem_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vrem_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrem_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vrem_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrem_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vrem_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vrem_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vrem_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrem_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vrem_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrem_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vrem_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vrem_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vrem_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrem_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vrem_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrem_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vrem_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vrem_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vrem_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrem_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vrem_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrem_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vrem_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vrem_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vrem_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vrem_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vrem_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrem_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vrem_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vrem_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vrem_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrem_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vrem_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vrem_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vrem_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vrem_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vrem_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrem_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vrem_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vrem_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vrem_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vrem_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vrem_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrem_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vrem_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vrem_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vrem_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vrem_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vrem_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrem_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vrem_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vrem_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vrem_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vrem_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vrem_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrem_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vrem_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vrem_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vrem_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vrem_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vrem_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrem_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vrem_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vrem_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vrem_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vrem_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vrem_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrem_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vrem_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vrem_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vrem_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vrem_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vrem_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrem_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vrem_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vrem_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vrem_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vrem_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vrem_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrem_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vrem_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vrem_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vrem_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vrem_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vrem_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrem_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vrem_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vrem_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vrem_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vrem_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vrem_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrem_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vrem_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vrem_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vrem_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vrem_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vrem_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrem_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vrem_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vrem_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vrem_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vrem_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vrem_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrem_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vrem_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vrem_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vrem_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vrem_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vrem_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrem_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vrem_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vrem_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vrem_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vrem_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vrem_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrem_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vrem_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vrem_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vremu.c b/auto-generated/policy_funcs/llvm-api-tests/vremu.c index ea8f3f8d6..fe92cd7e3 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vremu.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vremu.c @@ -5,706 +5,939 @@ #include -vuint8mf8_t test_vremu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vremu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vremu_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vremu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vremu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vremu_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vremu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vremu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vremu_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vremu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vremu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vremu_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vremu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vremu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vremu_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vremu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vremu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vremu_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vremu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vremu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vremu_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vremu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vremu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vremu_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vremu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vremu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vremu_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vremu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vremu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vremu_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vremu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vremu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vremu_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vremu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vremu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vremu_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vremu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vremu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { return __riscv_vremu_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vremu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vremu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vremu_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vremu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vremu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vremu_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vremu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vremu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vremu_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vremu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vremu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vremu_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vremu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vremu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vremu_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vremu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vremu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vremu_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vremu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vremu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vremu_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vremu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vremu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vremu_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vremu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vremu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vremu_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vremu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vremu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vremu_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vremu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vremu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vremu_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vremu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vremu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vremu_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vremu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vremu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vremu_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vremu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vremu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vremu_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vremu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vremu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vremu_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vremu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vremu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vremu_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vremu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vremu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vremu_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vremu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vremu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vremu_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vremu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vremu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vremu_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vremu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vremu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vremu_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vremu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vremu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vremu_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vremu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vremu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vremu_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vremu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vremu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vremu_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vremu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vremu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vremu_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vremu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vremu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vremu_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vremu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vremu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vremu_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vremu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vremu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vremu_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vremu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vremu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vremu_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vremu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vremu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vremu_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vremu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vremu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vremu_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vremu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vremu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vremu_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vremu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vremu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vremu_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vremu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vremu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vremu_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vremu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vremu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vremu_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vremu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vremu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vremu_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vremu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vremu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vremu_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vremu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vremu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vremu_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vremu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vremu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vremu_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vremu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vremu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vremu_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vremu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vremu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vremu_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vremu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vremu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vremu_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vremu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vremu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vremu_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vremu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vremu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vremu_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vremu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vremu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vremu_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vremu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vremu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vremu_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vremu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vremu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vremu_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vremu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vremu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vremu_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vremu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vremu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vremu_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vremu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vremu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vremu_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vremu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vremu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vremu_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vremu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vremu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vremu_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vremu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vremu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vremu_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vremu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vremu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vremu_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vremu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vremu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vremu_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vremu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vremu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vremu_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vremu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vremu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vremu_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vremu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vremu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vremu_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vremu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vremu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vremu_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vremu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vremu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vremu_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vremu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vremu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vremu_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vremu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vremu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vremu_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vremu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vremu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vremu_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vremu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vremu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vremu_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vremu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vremu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vremu_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vremu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vremu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vremu_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vremu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vremu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vremu_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vremu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vremu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { return __riscv_vremu_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vremu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vremu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vremu_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vremu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vremu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vremu_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vremu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vremu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vremu_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vremu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vremu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vremu_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vremu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vremu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vremu_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vremu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vremu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vremu_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vremu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vremu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vremu_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vremu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vremu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vremu_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vremu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vremu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vremu_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vremu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vremu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vremu_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vremu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vremu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vremu_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vremu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vremu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vremu_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vremu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vremu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vremu_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vremu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vremu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vremu_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vremu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vremu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vremu_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vremu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vremu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vremu_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vremu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vremu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vremu_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vremu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vremu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vremu_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vremu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vremu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vremu_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vremu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vremu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vremu_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vremu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vremu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vremu_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vremu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vremu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vremu_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vremu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vremu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vremu_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vremu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vremu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vremu_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vremu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vremu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vremu_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vremu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vremu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vremu_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vremu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vremu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vremu_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vremu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vremu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vremu_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vremu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vremu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vremu_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vremu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vremu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vremu_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vremu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vremu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vremu_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vremu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vremu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vremu_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vremu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vremu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vremu_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vremu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vremu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vremu_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vremu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vremu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vremu_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vremu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vremu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vremu_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vremu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vremu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vremu_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vremu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vremu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vremu_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vremu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vremu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vremu_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vremu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vremu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vremu_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vremu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vremu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vremu_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vremu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vremu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vremu_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vremu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vremu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vremu_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vremu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vremu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { return __riscv_vremu_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vremu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vremu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vremu_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vremu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vremu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vremu_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vremu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vremu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vremu_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vremu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vremu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vremu_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vremu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vremu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vremu_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vremu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vremu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vremu_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vremu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vremu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vremu_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vremu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vremu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vremu_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vremu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vremu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vremu_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vremu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vremu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vremu_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vremu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vremu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vremu_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vremu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vremu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vremu_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vremu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vremu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vremu_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vremu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vremu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vremu_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vremu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vremu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vremu_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vremu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vremu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vremu_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vremu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vremu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vremu_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vremu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vremu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vremu_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vremu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vremu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vremu_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vremu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vremu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vremu_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vremu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vremu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vremu_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vremu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vremu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vremu_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vremu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vremu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vremu_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vremu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vremu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vremu_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vremu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vremu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vremu_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vremu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vremu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vremu_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vremu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vremu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vremu_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vremu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vremu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vremu_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vremu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vremu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vremu_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vremu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vremu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vremu_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vremu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vremu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vremu_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vremu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vremu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vremu_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vremu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vremu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vremu_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vremu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vremu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vremu_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vremu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vremu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vremu_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vremu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vremu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vremu_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vremu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vremu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vremu_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vremu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vremu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vremu_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vremu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vremu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vremu_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vremu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vremu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vremu_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vremu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vremu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vremu_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vremu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vremu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vremu_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vremu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vremu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vremu_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vremu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vremu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vremu_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vremu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vremu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vremu_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vremu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vremu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vremu_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vremu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vremu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vremu_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vremu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vremu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vremu_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vremu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vremu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vremu_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vremu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vremu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vremu_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vremu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vremu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vremu_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vremu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vremu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vremu_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vrgather.c b/auto-generated/policy_funcs/llvm-api-tests/vrgather.c index f43b185b7..b1b90ee12 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vrgather.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vrgather.c @@ -1,1895 +1,2593 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vrgather_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vrgather_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vrgather_vv_f16mf4_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vrgather_vx_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vs1, size_t vl) { +vfloat16mf4_t test_vrgather_vx_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_f16mf4_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vrgather_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vrgather_vv_f16mf2_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vx_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vs1, size_t vl) { +vfloat16mf2_t test_vrgather_vx_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_f16mf2_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vfloat16m1_t test_vrgather_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vrgather_vv_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vx_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, size_t vs1, size_t vl) { +vfloat16m1_t test_vrgather_vx_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vfloat16m2_t test_vrgather_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vrgather_vv_f16m2_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vx_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, size_t vs1, size_t vl) { +vfloat16m2_t test_vrgather_vx_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_f16m2_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vfloat16m4_t test_vrgather_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vrgather_vv_f16m4_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vx_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, size_t vs1, size_t vl) { +vfloat16m4_t test_vrgather_vx_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_f16m4_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vfloat16m8_t test_vrgather_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vrgather_vv_f16m8_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vx_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, size_t vs1, size_t vl) { +vfloat16m8_t test_vrgather_vx_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_f16m8_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vrgather_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vrgather_vv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vx_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vs1, size_t vl) { +vfloat32mf2_t test_vrgather_vx_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vfloat32m1_t test_vrgather_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vrgather_vv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vx_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, size_t vs1, size_t vl) { +vfloat32m1_t test_vrgather_vx_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vfloat32m2_t test_vrgather_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vrgather_vv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vx_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, size_t vs1, size_t vl) { +vfloat32m2_t test_vrgather_vx_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vfloat32m4_t test_vrgather_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vrgather_vv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vx_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, size_t vs1, size_t vl) { +vfloat32m4_t test_vrgather_vx_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vfloat32m8_t test_vrgather_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vrgather_vv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vx_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, size_t vs1, size_t vl) { +vfloat32m8_t test_vrgather_vx_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_f32m8_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vfloat64m1_t test_vrgather_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vrgather_vv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vx_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, size_t vs1, size_t vl) { +vfloat64m1_t test_vrgather_vx_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vfloat64m2_t test_vrgather_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vrgather_vv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vx_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, size_t vs1, size_t vl) { +vfloat64m2_t test_vrgather_vx_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vfloat64m4_t test_vrgather_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vrgather_vv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vx_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, size_t vs1, size_t vl) { +vfloat64m4_t test_vrgather_vx_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vfloat64m8_t test_vrgather_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vrgather_vv_f64m8_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vx_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, size_t vs1, size_t vl) { +vfloat64m8_t test_vrgather_vx_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_f64m8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vrgather_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vrgather_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vrgather_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vrgather_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, size_t vs1, size_t vl) { +vint8mf8_t test_vrgather_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vrgather_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vrgather_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vrgather_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vrgather_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, size_t vs1, size_t vl) { +vint8mf4_t test_vrgather_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vrgather_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vrgather_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vrgather_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vrgather_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, size_t vs1, size_t vl) { +vint8mf2_t test_vrgather_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_i8mf2_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vrgather_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vint8m1_t test_vrgather_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vrgather_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vrgather_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t vs1, size_t vl) { +vint8m1_t test_vrgather_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_i8m1_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vrgather_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vint8m2_t test_vrgather_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vrgather_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vrgather_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t vs1, size_t vl) { +vint8m2_t test_vrgather_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_i8m2_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vrgather_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vint8m4_t test_vrgather_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vrgather_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vrgather_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t vs1, size_t vl) { +vint8m4_t test_vrgather_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_i8m4_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vrgather_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vint8m8_t test_vrgather_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { return __riscv_vrgather_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vrgather_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t vs1, size_t vl) { +vint8m8_t test_vrgather_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_i8m8_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vrgather_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vrgather_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vrgather_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vrgather_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, size_t vs1, size_t vl) { +vint16mf4_t test_vrgather_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vrgather_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vrgather_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vrgather_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vrgather_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, size_t vs1, size_t vl) { +vint16mf2_t test_vrgather_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_i16mf2_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vrgather_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vrgather_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vrgather_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vrgather_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, size_t vs1, size_t vl) { +vint16m1_t test_vrgather_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_i16m1_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vrgather_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vrgather_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vrgather_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vrgather_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, size_t vs1, size_t vl) { +vint16m2_t test_vrgather_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_i16m2_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vrgather_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vrgather_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vrgather_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vrgather_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, size_t vs1, size_t vl) { +vint16m4_t test_vrgather_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_i16m4_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vrgather_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vint16m8_t test_vrgather_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vrgather_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vrgather_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, size_t vs1, size_t vl) { +vint16m8_t test_vrgather_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_i16m8_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vrgather_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vrgather_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vrgather_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vrgather_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, size_t vs1, size_t vl) { +vint32mf2_t test_vrgather_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_i32mf2_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vrgather_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vint32m1_t test_vrgather_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vrgather_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vrgather_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, size_t vs1, size_t vl) { +vint32m1_t test_vrgather_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_i32m1_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vrgather_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vint32m2_t test_vrgather_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vrgather_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vrgather_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, size_t vs1, size_t vl) { +vint32m2_t test_vrgather_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_i32m2_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vrgather_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vint32m4_t test_vrgather_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vrgather_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vrgather_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, size_t vs1, size_t vl) { +vint32m4_t test_vrgather_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_i32m4_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vrgather_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vint32m8_t test_vrgather_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vrgather_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vrgather_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, size_t vs1, size_t vl) { +vint32m8_t test_vrgather_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_i32m8_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vrgather_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vint64m1_t test_vrgather_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vrgather_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vrgather_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, size_t vs1, size_t vl) { +vint64m1_t test_vrgather_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_i64m1_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vrgather_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vint64m2_t test_vrgather_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vrgather_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vrgather_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, size_t vs1, size_t vl) { +vint64m2_t test_vrgather_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_i64m2_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vrgather_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vint64m4_t test_vrgather_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vrgather_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vrgather_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, size_t vs1, size_t vl) { +vint64m4_t test_vrgather_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_i64m4_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vrgather_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vint64m8_t test_vrgather_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vrgather_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vrgather_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, size_t vs1, size_t vl) { +vint64m8_t test_vrgather_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_i64m8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vrgather_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vrgather_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vs1, size_t vl) { +vuint8mf8_t test_vrgather_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vrgather_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vrgather_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vs1, size_t vl) { +vuint8mf4_t test_vrgather_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vrgather_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vrgather_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vs1, size_t vl) { +vuint8mf2_t test_vrgather_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vrgather_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vrgather_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vrgather_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vrgather_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vs1, size_t vl) { +vuint8m1_t test_vrgather_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vrgather_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vrgather_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vrgather_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vrgather_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vs1, size_t vl) { +vuint8m2_t test_vrgather_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vrgather_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vrgather_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vrgather_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vrgather_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vs1, size_t vl) { +vuint8m4_t test_vrgather_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vrgather_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vrgather_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vrgather_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vrgather_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vs1, size_t vl) { +vuint8m8_t test_vrgather_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_u8m8_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vrgather_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vrgather_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vs1, size_t vl) { +vuint16mf4_t test_vrgather_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vrgather_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vrgather_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vs1, size_t vl) { +vuint16mf2_t test_vrgather_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vrgather_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vrgather_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vrgather_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vrgather_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vs1, size_t vl) { +vuint16m1_t test_vrgather_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vrgather_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vrgather_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vrgather_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vrgather_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vs1, size_t vl) { +vuint16m2_t test_vrgather_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vrgather_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vrgather_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vrgather_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vrgather_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vs1, size_t vl) { +vuint16m4_t test_vrgather_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vrgather_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vrgather_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vrgather_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vrgather_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vs1, size_t vl) { +vuint16m8_t test_vrgather_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_u16m8_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vrgather_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vrgather_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vs1, size_t vl) { +vuint32mf2_t test_vrgather_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vrgather_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vrgather_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vrgather_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vrgather_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vs1, size_t vl) { +vuint32m1_t test_vrgather_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vrgather_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vrgather_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vrgather_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vrgather_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vs1, size_t vl) { +vuint32m2_t test_vrgather_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vrgather_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vrgather_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vrgather_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vrgather_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vs1, size_t vl) { +vuint32m4_t test_vrgather_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vrgather_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vrgather_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vrgather_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vrgather_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vs1, size_t vl) { +vuint32m8_t test_vrgather_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_u32m8_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vrgather_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vrgather_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vrgather_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vrgather_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vs1, size_t vl) { +vuint64m1_t test_vrgather_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vrgather_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vrgather_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vrgather_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vrgather_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vs1, size_t vl) { +vuint64m2_t test_vrgather_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vrgather_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vrgather_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vrgather_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vrgather_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vs1, size_t vl) { +vuint64m4_t test_vrgather_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vrgather_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vrgather_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vrgather_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vrgather_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vs1, size_t vl) { +vuint64m8_t test_vrgather_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_u64m8_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vrgather_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vrgather_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgather_vv_f16mf4_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vrgather_vx_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vs1, size_t vl) { +vfloat16mf4_t test_vrgather_vx_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f16mf4_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vrgather_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgather_vv_f16mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vx_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vs1, size_t vl) { +vfloat16mf2_t test_vrgather_vx_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f16mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vfloat16m1_t test_vrgather_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgather_vv_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vx_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vs1, size_t vl) { +vfloat16m1_t test_vrgather_vx_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vfloat16m2_t test_vrgather_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgather_vv_f16m2_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vx_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vs1, size_t vl) { +vfloat16m2_t test_vrgather_vx_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f16m2_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vfloat16m4_t test_vrgather_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vrgather_vv_f16m4_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vx_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vs1, size_t vl) { +vfloat16m4_t test_vrgather_vx_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f16m4_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vfloat16m8_t test_vrgather_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vrgather_vv_f16m8_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vx_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vs1, size_t vl) { +vfloat16m8_t test_vrgather_vx_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f16m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vrgather_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vrgather_vv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vx_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vs1, size_t vl) { +vfloat32mf2_t test_vrgather_vx_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vfloat32m1_t test_vrgather_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vrgather_vv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vx_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vs1, size_t vl) { +vfloat32m1_t test_vrgather_vx_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vfloat32m2_t test_vrgather_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vrgather_vv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vx_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vs1, size_t vl) { +vfloat32m2_t test_vrgather_vx_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vfloat32m4_t test_vrgather_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vrgather_vv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vx_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vs1, size_t vl) { +vfloat32m4_t test_vrgather_vx_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vfloat32m8_t test_vrgather_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vrgather_vv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vx_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vs1, size_t vl) { +vfloat32m8_t test_vrgather_vx_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vfloat64m1_t test_vrgather_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vrgather_vv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vx_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vs1, size_t vl) { +vfloat64m1_t test_vrgather_vx_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vfloat64m2_t test_vrgather_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vrgather_vv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vx_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vs1, size_t vl) { +vfloat64m2_t test_vrgather_vx_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vfloat64m4_t test_vrgather_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vrgather_vv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vx_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vs1, size_t vl) { +vfloat64m4_t test_vrgather_vx_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vfloat64m8_t test_vrgather_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vrgather_vv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vx_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vs1, size_t vl) { +vfloat64m8_t test_vrgather_vx_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f64m8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrgather_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vrgather_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + vint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vrgather_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrgather_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t vs1, size_t vl) { +vint8mf8_t test_vrgather_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + vint8mf8_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrgather_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vrgather_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + vint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vrgather_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrgather_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t vs1, size_t vl) { +vint8mf4_t test_vrgather_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + vint8mf4_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrgather_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vrgather_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, + vint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vrgather_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrgather_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t vs1, size_t vl) { +vint8mf2_t test_vrgather_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, + vint8mf2_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrgather_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vint8m1_t test_vrgather_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vrgather_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrgather_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t vs1, size_t vl) { +vint8m1_t test_vrgather_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrgather_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vint8m2_t test_vrgather_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vrgather_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrgather_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t vs1, size_t vl) { +vint8m2_t test_vrgather_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrgather_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vint8m4_t test_vrgather_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vrgather_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrgather_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t vs1, size_t vl) { +vint8m4_t test_vrgather_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vrgather_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vint8m8_t test_vrgather_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vrgather_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vrgather_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t vs1, size_t vl) { +vint8m8_t test_vrgather_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrgather_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vrgather_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgather_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrgather_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t vs1, size_t vl) { +vint16mf4_t test_vrgather_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrgather_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vrgather_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgather_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrgather_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t vs1, size_t vl) { +vint16mf2_t test_vrgather_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrgather_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vrgather_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgather_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrgather_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t vs1, size_t vl) { +vint16m1_t test_vrgather_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrgather_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vrgather_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, + vint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgather_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrgather_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t vs1, size_t vl) { +vint16m2_t test_vrgather_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, + vint16m2_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrgather_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vrgather_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, + vint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vrgather_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrgather_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t vs1, size_t vl) { +vint16m4_t test_vrgather_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, + vint16m4_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrgather_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vint16m8_t test_vrgather_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, + vint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vrgather_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrgather_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t vs1, size_t vl) { +vint16m8_t test_vrgather_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, + vint16m8_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrgather_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vrgather_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vrgather_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrgather_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t vs1, size_t vl) { +vint32mf2_t test_vrgather_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrgather_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vint32m1_t test_vrgather_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vrgather_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrgather_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t vs1, size_t vl) { +vint32m1_t test_vrgather_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrgather_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vint32m2_t test_vrgather_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, + vint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vrgather_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrgather_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t vs1, size_t vl) { +vint32m2_t test_vrgather_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, + vint32m2_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrgather_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vint32m4_t test_vrgather_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, + vint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vrgather_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrgather_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t vs1, size_t vl) { +vint32m4_t test_vrgather_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, + vint32m4_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrgather_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vint32m8_t test_vrgather_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, + vint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vrgather_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrgather_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t vs1, size_t vl) { +vint32m8_t test_vrgather_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, + vint32m8_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrgather_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vint64m1_t test_vrgather_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vrgather_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrgather_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t vs1, size_t vl) { +vint64m1_t test_vrgather_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrgather_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vint64m2_t test_vrgather_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, + vint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vrgather_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrgather_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t vs1, size_t vl) { +vint64m2_t test_vrgather_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, + vint64m2_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrgather_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vint64m4_t test_vrgather_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, + vint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vrgather_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrgather_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t vs1, size_t vl) { +vint64m4_t test_vrgather_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, + vint64m4_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrgather_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vint64m8_t test_vrgather_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, + vint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vrgather_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrgather_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t vs1, size_t vl) { +vint64m8_t test_vrgather_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, + vint64m8_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i64m8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vrgather_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vrgather_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vs1, size_t vl) { +vuint8mf8_t test_vrgather_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vrgather_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vrgather_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vs1, size_t vl) { +vuint8mf4_t test_vrgather_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vrgather_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vrgather_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vs1, size_t vl) { +vuint8mf2_t test_vrgather_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrgather_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vrgather_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vrgather_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrgather_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vs1, size_t vl) { +vuint8m1_t test_vrgather_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrgather_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vrgather_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vrgather_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrgather_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vs1, size_t vl) { +vuint8m2_t test_vrgather_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrgather_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vrgather_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vrgather_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrgather_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vs1, size_t vl) { +vuint8m4_t test_vrgather_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vrgather_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vrgather_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vrgather_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vrgather_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vs1, size_t vl) { +vuint8m8_t test_vrgather_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vrgather_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgather_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vs1, size_t vl) { +vuint16mf4_t test_vrgather_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vrgather_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgather_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vs1, size_t vl) { +vuint16mf2_t test_vrgather_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrgather_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vrgather_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgather_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrgather_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vs1, size_t vl) { +vuint16m1_t test_vrgather_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrgather_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vrgather_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgather_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrgather_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vs1, size_t vl) { +vuint16m2_t test_vrgather_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrgather_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vrgather_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vrgather_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrgather_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vs1, size_t vl) { +vuint16m4_t test_vrgather_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrgather_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vrgather_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vrgather_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrgather_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vs1, size_t vl) { +vuint16m8_t test_vrgather_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vrgather_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vrgather_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vs1, size_t vl) { +vuint32mf2_t test_vrgather_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrgather_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vrgather_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vrgather_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrgather_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vs1, size_t vl) { +vuint32m1_t test_vrgather_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrgather_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vrgather_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vrgather_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrgather_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vs1, size_t vl) { +vuint32m2_t test_vrgather_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrgather_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vrgather_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vrgather_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrgather_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vs1, size_t vl) { +vuint32m4_t test_vrgather_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrgather_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vrgather_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vrgather_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrgather_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vs1, size_t vl) { +vuint32m8_t test_vrgather_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrgather_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vrgather_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vrgather_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrgather_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vs1, size_t vl) { +vuint64m1_t test_vrgather_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrgather_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vrgather_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vrgather_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrgather_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vs1, size_t vl) { +vuint64m2_t test_vrgather_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrgather_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vrgather_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vrgather_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrgather_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vs1, size_t vl) { +vuint64m4_t test_vrgather_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrgather_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vrgather_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vrgather_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrgather_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vs1, size_t vl) { +vuint64m8_t test_vrgather_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vrgather_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vrgather_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgather_vv_f16mf4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vrgather_vx_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vs1, size_t vl) { +vfloat16mf4_t test_vrgather_vx_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f16mf4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vrgather_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgather_vv_f16mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vx_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vs1, size_t vl) { +vfloat16mf2_t test_vrgather_vx_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f16mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vfloat16m1_t test_vrgather_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgather_vv_f16m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vx_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vs1, size_t vl) { +vfloat16m1_t test_vrgather_vx_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f16m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vfloat16m2_t test_vrgather_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgather_vv_f16m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vx_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vs1, size_t vl) { +vfloat16m2_t test_vrgather_vx_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f16m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vfloat16m4_t test_vrgather_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vrgather_vv_f16m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vx_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vs1, size_t vl) { +vfloat16m4_t test_vrgather_vx_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f16m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vfloat16m8_t test_vrgather_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vrgather_vv_f16m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vx_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vs1, size_t vl) { +vfloat16m8_t test_vrgather_vx_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f16m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vrgather_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vrgather_vv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vx_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vs1, size_t vl) { +vfloat32mf2_t test_vrgather_vx_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vfloat32m1_t test_vrgather_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vrgather_vv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vx_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vs1, size_t vl) { +vfloat32m1_t test_vrgather_vx_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vfloat32m2_t test_vrgather_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vrgather_vv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vx_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vs1, size_t vl) { +vfloat32m2_t test_vrgather_vx_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vfloat32m4_t test_vrgather_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vrgather_vv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vx_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vs1, size_t vl) { +vfloat32m4_t test_vrgather_vx_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vfloat32m8_t test_vrgather_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vrgather_vv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vx_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vs1, size_t vl) { +vfloat32m8_t test_vrgather_vx_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vfloat64m1_t test_vrgather_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vrgather_vv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vx_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vs1, size_t vl) { +vfloat64m1_t test_vrgather_vx_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vfloat64m2_t test_vrgather_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vrgather_vv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vx_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vs1, size_t vl) { +vfloat64m2_t test_vrgather_vx_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vfloat64m4_t test_vrgather_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vrgather_vv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vx_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vs1, size_t vl) { +vfloat64m4_t test_vrgather_vx_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vfloat64m8_t test_vrgather_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vrgather_vv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vx_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vs1, size_t vl) { +vfloat64m8_t test_vrgather_vx_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrgather_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vrgather_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + vint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vrgather_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrgather_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t vs1, size_t vl) { +vint8mf8_t test_vrgather_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + vint8mf8_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrgather_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vrgather_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + vint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vrgather_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrgather_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t vs1, size_t vl) { +vint8mf4_t test_vrgather_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + vint8mf4_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrgather_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vrgather_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + vint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vrgather_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrgather_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t vs1, size_t vl) { +vint8mf2_t test_vrgather_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + vint8mf2_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrgather_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vint8m1_t test_vrgather_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vrgather_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrgather_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t vs1, size_t vl) { +vint8m1_t test_vrgather_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrgather_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vint8m2_t test_vrgather_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vrgather_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrgather_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t vs1, size_t vl) { +vint8m2_t test_vrgather_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrgather_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vint8m4_t test_vrgather_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vrgather_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrgather_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t vs1, size_t vl) { +vint8m4_t test_vrgather_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vrgather_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vint8m8_t test_vrgather_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vrgather_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vrgather_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t vs1, size_t vl) { +vint8m8_t test_vrgather_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrgather_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vrgather_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgather_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrgather_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t vs1, size_t vl) { +vint16mf4_t test_vrgather_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrgather_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vrgather_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgather_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrgather_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t vs1, size_t vl) { +vint16mf2_t test_vrgather_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrgather_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vrgather_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgather_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrgather_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t vs1, size_t vl) { +vint16m1_t test_vrgather_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrgather_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vrgather_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, + vint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgather_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrgather_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t vs1, size_t vl) { +vint16m2_t test_vrgather_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, + vint16m2_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrgather_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vrgather_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, + vint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vrgather_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrgather_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t vs1, size_t vl) { +vint16m4_t test_vrgather_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, + vint16m4_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrgather_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vint16m8_t test_vrgather_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, + vint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vrgather_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrgather_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t vs1, size_t vl) { +vint16m8_t test_vrgather_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, + vint16m8_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrgather_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vrgather_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vrgather_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrgather_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t vs1, size_t vl) { +vint32mf2_t test_vrgather_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrgather_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vint32m1_t test_vrgather_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vrgather_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrgather_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t vs1, size_t vl) { +vint32m1_t test_vrgather_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrgather_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vint32m2_t test_vrgather_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + vint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vrgather_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrgather_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t vs1, size_t vl) { +vint32m2_t test_vrgather_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + vint32m2_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrgather_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vint32m4_t test_vrgather_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, + vint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vrgather_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrgather_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t vs1, size_t vl) { +vint32m4_t test_vrgather_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, + vint32m4_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrgather_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vint32m8_t test_vrgather_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, + vint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vrgather_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrgather_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t vs1, size_t vl) { +vint32m8_t test_vrgather_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, + vint32m8_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrgather_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vint64m1_t test_vrgather_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vrgather_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrgather_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t vs1, size_t vl) { +vint64m1_t test_vrgather_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrgather_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vint64m2_t test_vrgather_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + vint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vrgather_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrgather_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t vs1, size_t vl) { +vint64m2_t test_vrgather_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + vint64m2_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrgather_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vint64m4_t test_vrgather_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + vint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vrgather_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrgather_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t vs1, size_t vl) { +vint64m4_t test_vrgather_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + vint64m4_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrgather_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vint64m8_t test_vrgather_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, + vint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vrgather_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrgather_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t vs1, size_t vl) { +vint64m8_t test_vrgather_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, + vint64m8_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vrgather_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vrgather_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vs1, size_t vl) { +vuint8mf8_t test_vrgather_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vrgather_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vrgather_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vs1, size_t vl) { +vuint8mf4_t test_vrgather_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vrgather_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vrgather_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vs1, size_t vl) { +vuint8mf2_t test_vrgather_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrgather_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vrgather_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vrgather_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrgather_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vs1, size_t vl) { +vuint8m1_t test_vrgather_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + vuint8m1_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrgather_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vrgather_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vrgather_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrgather_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vs1, size_t vl) { +vuint8m2_t test_vrgather_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + vuint8m2_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrgather_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vrgather_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vrgather_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrgather_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vs1, size_t vl) { +vuint8m4_t test_vrgather_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + vuint8m4_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vrgather_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vrgather_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, + vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { return __riscv_vrgather_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vrgather_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vs1, size_t vl) { +vuint8m8_t test_vrgather_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, + vuint8m8_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vrgather_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgather_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vs1, size_t vl) { +vuint16mf4_t test_vrgather_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vrgather_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgather_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vs1, size_t vl) { +vuint16mf2_t test_vrgather_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrgather_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vrgather_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgather_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrgather_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vs1, size_t vl) { +vuint16m1_t test_vrgather_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrgather_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vrgather_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgather_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrgather_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vs1, size_t vl) { +vuint16m2_t test_vrgather_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrgather_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vrgather_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vrgather_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrgather_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vs1, size_t vl) { +vuint16m4_t test_vrgather_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrgather_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vrgather_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vrgather_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrgather_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vs1, size_t vl) { +vuint16m8_t test_vrgather_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vrgather_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vrgather_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vs1, size_t vl) { +vuint32mf2_t test_vrgather_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrgather_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vrgather_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vrgather_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrgather_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vs1, size_t vl) { +vuint32m1_t test_vrgather_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrgather_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vrgather_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vrgather_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrgather_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vs1, size_t vl) { +vuint32m2_t test_vrgather_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrgather_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vrgather_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vrgather_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrgather_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vs1, size_t vl) { +vuint32m4_t test_vrgather_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrgather_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vrgather_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vrgather_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrgather_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vs1, size_t vl) { +vuint32m8_t test_vrgather_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrgather_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vrgather_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vrgather_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrgather_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vs1, size_t vl) { +vuint64m1_t test_vrgather_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrgather_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vrgather_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vrgather_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrgather_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vs1, size_t vl) { +vuint64m2_t test_vrgather_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrgather_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vrgather_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vrgather_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrgather_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vs1, size_t vl) { +vuint64m4_t test_vrgather_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrgather_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vrgather_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vrgather_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrgather_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vs1, size_t vl) { +vuint64m8_t test_vrgather_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vrgather_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vrgather_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgather_vv_f16mf4_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vrgather_vx_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t vs1, size_t vl) { +vfloat16mf4_t test_vrgather_vx_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f16mf4_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vrgather_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgather_vv_f16mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgather_vx_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t vs1, size_t vl) { +vfloat16mf2_t test_vrgather_vx_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f16mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vfloat16m1_t test_vrgather_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgather_vv_f16m1_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgather_vx_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t vs1, size_t vl) { +vfloat16m1_t test_vrgather_vx_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f16m1_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vfloat16m2_t test_vrgather_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgather_vv_f16m2_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgather_vx_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t vs1, size_t vl) { +vfloat16m2_t test_vrgather_vx_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f16m2_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vfloat16m4_t test_vrgather_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vrgather_vv_f16m4_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgather_vx_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t vs1, size_t vl) { +vfloat16m4_t test_vrgather_vx_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f16m4_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vfloat16m8_t test_vrgather_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vrgather_vv_f16m8_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgather_vx_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t vs1, size_t vl) { +vfloat16m8_t test_vrgather_vx_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f16m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vfloat32mf2_t test_vrgather_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vrgather_vv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgather_vx_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t vs1, size_t vl) { +vfloat32mf2_t test_vrgather_vx_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vfloat32m1_t test_vrgather_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vrgather_vv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgather_vx_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t vs1, size_t vl) { +vfloat32m1_t test_vrgather_vx_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vfloat32m2_t test_vrgather_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vrgather_vv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgather_vx_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t vs1, size_t vl) { +vfloat32m2_t test_vrgather_vx_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vfloat32m4_t test_vrgather_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vrgather_vv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgather_vx_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t vs1, size_t vl) { +vfloat32m4_t test_vrgather_vx_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vfloat32m8_t test_vrgather_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vrgather_vv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgather_vx_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t vs1, size_t vl) { +vfloat32m8_t test_vrgather_vx_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vfloat64m1_t test_vrgather_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vrgather_vv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgather_vx_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t vs1, size_t vl) { +vfloat64m1_t test_vrgather_vx_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vfloat64m2_t test_vrgather_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vrgather_vv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgather_vx_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t vs1, size_t vl) { +vfloat64m2_t test_vrgather_vx_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vfloat64m4_t test_vrgather_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vrgather_vv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgather_vx_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t vs1, size_t vl) { +vfloat64m4_t test_vrgather_vx_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vfloat64m8_t test_vrgather_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vrgather_vv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgather_vx_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t vs1, size_t vl) { +vfloat64m8_t test_vrgather_vx_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_f64m8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrgather_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vrgather_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, + vint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vrgather_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrgather_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t vs1, size_t vl) { +vint8mf8_t test_vrgather_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, + vint8mf8_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrgather_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vrgather_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, + vint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vrgather_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrgather_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t vs1, size_t vl) { +vint8mf4_t test_vrgather_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, + vint8mf4_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrgather_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vrgather_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, + vint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vrgather_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrgather_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t vs1, size_t vl) { +vint8mf2_t test_vrgather_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, + vint8mf2_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrgather_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vint8m1_t test_vrgather_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vrgather_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrgather_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t vs1, size_t vl) { +vint8m1_t test_vrgather_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrgather_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vint8m2_t test_vrgather_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vrgather_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrgather_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t vs1, size_t vl) { +vint8m2_t test_vrgather_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrgather_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vint8m4_t test_vrgather_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vrgather_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrgather_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t vs1, size_t vl) { +vint8m4_t test_vrgather_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vrgather_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vint8m8_t test_vrgather_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vrgather_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vrgather_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t vs1, size_t vl) { +vint8m8_t test_vrgather_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrgather_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vrgather_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgather_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrgather_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t vs1, size_t vl) { +vint16mf4_t test_vrgather_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrgather_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vrgather_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgather_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrgather_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t vs1, size_t vl) { +vint16mf2_t test_vrgather_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrgather_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vrgather_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgather_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrgather_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t vs1, size_t vl) { +vint16m1_t test_vrgather_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrgather_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vrgather_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vrgather_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrgather_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t vs1, size_t vl) { +vint16m2_t test_vrgather_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrgather_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vrgather_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vrgather_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrgather_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t vs1, size_t vl) { +vint16m4_t test_vrgather_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrgather_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vint16m8_t test_vrgather_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vrgather_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrgather_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t vs1, size_t vl) { +vint16m8_t test_vrgather_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrgather_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vrgather_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vrgather_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrgather_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t vs1, size_t vl) { +vint32mf2_t test_vrgather_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrgather_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vint32m1_t test_vrgather_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vrgather_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrgather_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t vs1, size_t vl) { +vint32m1_t test_vrgather_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrgather_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vint32m2_t test_vrgather_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, + vint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vrgather_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrgather_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t vs1, size_t vl) { +vint32m2_t test_vrgather_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, + vint32m2_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrgather_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vint32m4_t test_vrgather_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vrgather_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrgather_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t vs1, size_t vl) { +vint32m4_t test_vrgather_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrgather_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vint32m8_t test_vrgather_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vrgather_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrgather_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t vs1, size_t vl) { +vint32m8_t test_vrgather_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrgather_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vint64m1_t test_vrgather_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vrgather_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrgather_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t vs1, size_t vl) { +vint64m1_t test_vrgather_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrgather_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vint64m2_t test_vrgather_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, + vint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vrgather_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrgather_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t vs1, size_t vl) { +vint64m2_t test_vrgather_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, + vint64m2_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrgather_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vint64m4_t test_vrgather_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, + vint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vrgather_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrgather_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t vs1, size_t vl) { +vint64m4_t test_vrgather_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, + vint64m4_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrgather_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vint64m8_t test_vrgather_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vrgather_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrgather_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t vs1, size_t vl) { +vint64m8_t test_vrgather_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_i64m8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vrgather_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vrgather_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgather_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vs1, size_t vl) { +vuint8mf8_t test_vrgather_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vrgather_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vrgather_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgather_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vs1, size_t vl) { +vuint8mf4_t test_vrgather_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vrgather_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vrgather_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgather_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vs1, size_t vl) { +vuint8mf2_t test_vrgather_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrgather_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vrgather_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vrgather_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrgather_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vs1, size_t vl) { +vuint8m1_t test_vrgather_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrgather_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vrgather_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vrgather_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrgather_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vs1, size_t vl) { +vuint8m2_t test_vrgather_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrgather_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vrgather_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vrgather_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrgather_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vs1, size_t vl) { +vuint8m4_t test_vrgather_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vrgather_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vrgather_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vrgather_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vrgather_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vs1, size_t vl) { +vuint8m8_t test_vrgather_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vs1, size_t vl) { return __riscv_vrgather_vx_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vrgather_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgather_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgather_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vs1, size_t vl) { +vuint16mf4_t test_vrgather_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vrgather_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgather_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgather_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vs1, size_t vl) { +vuint16mf2_t test_vrgather_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrgather_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vrgather_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgather_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrgather_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vs1, size_t vl) { +vuint16m1_t test_vrgather_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrgather_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vrgather_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgather_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrgather_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vs1, size_t vl) { +vuint16m2_t test_vrgather_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrgather_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vrgather_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vrgather_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrgather_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vs1, size_t vl) { +vuint16m4_t test_vrgather_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrgather_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vrgather_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vrgather_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrgather_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vs1, size_t vl) { +vuint16m8_t test_vrgather_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vrgather_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vrgather_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgather_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vs1, size_t vl) { +vuint32mf2_t test_vrgather_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vs1, + size_t vl) { return __riscv_vrgather_vx_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrgather_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vrgather_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vrgather_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrgather_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vs1, size_t vl) { +vuint32m1_t test_vrgather_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrgather_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vrgather_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vrgather_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrgather_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vs1, size_t vl) { +vuint32m2_t test_vrgather_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrgather_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vrgather_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vrgather_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrgather_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vs1, size_t vl) { +vuint32m4_t test_vrgather_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrgather_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vrgather_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vrgather_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrgather_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vs1, size_t vl) { +vuint32m8_t test_vrgather_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrgather_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vrgather_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vrgather_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrgather_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vs1, size_t vl) { +vuint64m1_t test_vrgather_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrgather_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vrgather_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vrgather_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrgather_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vs1, size_t vl) { +vuint64m2_t test_vrgather_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrgather_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vrgather_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vrgather_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrgather_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vs1, size_t vl) { +vuint64m4_t test_vrgather_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrgather_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vrgather_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vrgather_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrgather_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vs1, size_t vl) { +vuint64m8_t test_vrgather_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vs1, size_t vl) { return __riscv_vrgather_vx_u64m8_mu(vm, vd, vs2, vs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vrgatherei16.c b/auto-generated/policy_funcs/llvm-api-tests/vrgatherei16.c index dd182f15f..8e42b19fc 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vrgatherei16.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vrgatherei16.c @@ -1,919 +1,1318 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vrgatherei16_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vrgatherei16_vv_f16mf4_tu(vfloat16mf4_t vd, + vfloat16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_f16mf4_tu(vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgatherei16_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vrgatherei16_vv_f16mf2_tu(vfloat16mf2_t vd, + vfloat16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_f16mf2_tu(vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgatherei16_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vfloat16m1_t test_vrgatherei16_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_f16m1_tu(vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgatherei16_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vfloat16m2_t test_vrgatherei16_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_f16m2_tu(vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgatherei16_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vfloat16m4_t test_vrgatherei16_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_f16m4_tu(vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgatherei16_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vfloat16m8_t test_vrgatherei16_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_f16m8_tu(vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tu(vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_f32mf2_tu(vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgatherei16_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vrgatherei16_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_f32m1_tu(vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgatherei16_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vfloat32m2_t test_vrgatherei16_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_f32m2_tu(vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgatherei16_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vfloat32m4_t test_vrgatherei16_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_f32m4_tu(vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgatherei16_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vfloat32m8_t test_vrgatherei16_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_f32m8_tu(vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgatherei16_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, vuint16mf4_t vs1, size_t vl) { +vfloat64m1_t test_vrgatherei16_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_f64m1_tu(vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgatherei16_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, vuint16mf2_t vs1, size_t vl) { +vfloat64m2_t test_vrgatherei16_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_f64m2_tu(vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgatherei16_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, vuint16m1_t vs1, size_t vl) { +vfloat64m4_t test_vrgatherei16_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_f64m4_tu(vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgatherei16_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, vuint16m2_t vs1, size_t vl) { +vfloat64m8_t test_vrgatherei16_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_f64m8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vrgatherei16_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { +vint8mf8_t test_vrgatherei16_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vrgatherei16_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { +vint8mf4_t test_vrgatherei16_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vrgatherei16_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { +vint8mf2_t test_vrgatherei16_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vrgatherei16_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vuint16m2_t vs1, size_t vl) { +vint8m1_t test_vrgatherei16_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vrgatherei16_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vuint16m4_t vs1, size_t vl) { +vint8m2_t test_vrgatherei16_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vrgatherei16_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vuint16m8_t vs1, size_t vl) { +vint8m4_t test_vrgatherei16_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vrgatherei16_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vrgatherei16_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vrgatherei16_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vrgatherei16_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vrgatherei16_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vrgatherei16_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vrgatherei16_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vrgatherei16_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vrgatherei16_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vrgatherei16_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vrgatherei16_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vint16m8_t test_vrgatherei16_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vrgatherei16_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vint32mf2_t test_vrgatherei16_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vrgatherei16_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vint32m1_t test_vrgatherei16_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vrgatherei16_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vint32m2_t test_vrgatherei16_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vrgatherei16_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vint32m4_t test_vrgatherei16_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vrgatherei16_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vint32m8_t test_vrgatherei16_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vrgatherei16_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { +vint64m1_t test_vrgatherei16_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vrgatherei16_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint64m2_t test_vrgatherei16_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vrgatherei16_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vuint16m1_t vs1, size_t vl) { +vint64m4_t test_vrgatherei16_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vrgatherei16_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vuint16m2_t vs1, size_t vl) { +vint64m8_t test_vrgatherei16_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_i64m8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgatherei16_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint8mf8_t test_vrgatherei16_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgatherei16_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint8mf4_t test_vrgatherei16_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgatherei16_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint8mf2_t test_vrgatherei16_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vrgatherei16_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint16m2_t vs1, size_t vl) { +vuint8m1_t test_vrgatherei16_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vrgatherei16_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint16m4_t vs1, size_t vl) { +vuint8m2_t test_vrgatherei16_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vrgatherei16_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint16m8_t vs1, size_t vl) { +vuint8m4_t test_vrgatherei16_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgatherei16_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vrgatherei16_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgatherei16_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vrgatherei16_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vrgatherei16_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vrgatherei16_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vrgatherei16_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vrgatherei16_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vrgatherei16_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vrgatherei16_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vrgatherei16_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vrgatherei16_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgatherei16_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vrgatherei16_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vrgatherei16_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vrgatherei16_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vrgatherei16_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vrgatherei16_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vrgatherei16_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vrgatherei16_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vrgatherei16_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vrgatherei16_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vrgatherei16_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint64m1_t test_vrgatherei16_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vrgatherei16_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint64m2_t test_vrgatherei16_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vrgatherei16_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint64m4_t test_vrgatherei16_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vrgatherei16_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint16m2_t vs1, size_t vl) { +vuint64m8_t test_vrgatherei16_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_u64m8_tu(vd, vs2, vs1, vl); } -vfloat16mf4_t test_vrgatherei16_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vrgatherei16_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_f16mf4_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgatherei16_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vrgatherei16_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_f16mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgatherei16_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vfloat16m1_t test_vrgatherei16_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f16m1_tum(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgatherei16_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vfloat16m2_t test_vrgatherei16_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f16m2_tum(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgatherei16_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vfloat16m4_t test_vrgatherei16_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f16m4_tum(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgatherei16_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vfloat16m8_t test_vrgatherei16_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f16m8_tum(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_f32mf2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgatherei16_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vrgatherei16_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f32m1_tum(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgatherei16_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vfloat32m2_t test_vrgatherei16_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f32m2_tum(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgatherei16_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vfloat32m4_t test_vrgatherei16_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f32m4_tum(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgatherei16_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vfloat32m8_t test_vrgatherei16_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f32m8_tum(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgatherei16_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vuint16mf4_t vs1, size_t vl) { +vfloat64m1_t test_vrgatherei16_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f64m1_tum(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgatherei16_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vuint16mf2_t vs1, size_t vl) { +vfloat64m2_t test_vrgatherei16_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f64m2_tum(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgatherei16_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vuint16m1_t vs1, size_t vl) { +vfloat64m4_t test_vrgatherei16_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f64m4_tum(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgatherei16_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vuint16m2_t vs1, size_t vl) { +vfloat64m8_t test_vrgatherei16_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f64m8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrgatherei16_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { +vint8mf8_t test_vrgatherei16_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + vint8mf8_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrgatherei16_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { +vint8mf4_t test_vrgatherei16_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + vint8mf4_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrgatherei16_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { +vint8mf2_t test_vrgatherei16_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, + vint8mf2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrgatherei16_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint16m2_t vs1, size_t vl) { +vint8m1_t test_vrgatherei16_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, + vint8m1_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrgatherei16_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint16m4_t vs1, size_t vl) { +vint8m2_t test_vrgatherei16_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, + vint8m2_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrgatherei16_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint16m8_t vs1, size_t vl) { +vint8m4_t test_vrgatherei16_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, + vint8m4_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrgatherei16_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vrgatherei16_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrgatherei16_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vrgatherei16_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrgatherei16_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vrgatherei16_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrgatherei16_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vrgatherei16_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, + vint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrgatherei16_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vrgatherei16_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, + vint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrgatherei16_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vint16m8_t test_vrgatherei16_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, + vint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrgatherei16_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vint32mf2_t test_vrgatherei16_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrgatherei16_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vint32m1_t test_vrgatherei16_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrgatherei16_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vint32m2_t test_vrgatherei16_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, + vint32m2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrgatherei16_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vint32m4_t test_vrgatherei16_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, + vint32m4_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrgatherei16_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vint32m8_t test_vrgatherei16_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, + vint32m8_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrgatherei16_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { +vint64m1_t test_vrgatherei16_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrgatherei16_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint64m2_t test_vrgatherei16_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, + vint64m2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrgatherei16_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint16m1_t vs1, size_t vl) { +vint64m4_t test_vrgatherei16_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, + vint64m4_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrgatherei16_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint16m2_t vs1, size_t vl) { +vint64m8_t test_vrgatherei16_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, + vint64m8_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgatherei16_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint8mf8_t test_vrgatherei16_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgatherei16_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint8mf4_t test_vrgatherei16_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgatherei16_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint8mf2_t test_vrgatherei16_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrgatherei16_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint16m2_t vs1, size_t vl) { +vuint8m1_t test_vrgatherei16_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, + vuint8m1_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrgatherei16_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint16m4_t vs1, size_t vl) { +vuint8m2_t test_vrgatherei16_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, + vuint8m2_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrgatherei16_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint16m8_t vs1, size_t vl) { +vuint8m4_t test_vrgatherei16_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, + vuint8m4_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgatherei16_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vrgatherei16_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgatherei16_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vrgatherei16_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrgatherei16_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vrgatherei16_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrgatherei16_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vrgatherei16_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrgatherei16_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vrgatherei16_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrgatherei16_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vrgatherei16_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgatherei16_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vrgatherei16_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrgatherei16_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vrgatherei16_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrgatherei16_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vrgatherei16_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrgatherei16_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vrgatherei16_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrgatherei16_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vrgatherei16_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrgatherei16_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint64m1_t test_vrgatherei16_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrgatherei16_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint64m2_t test_vrgatherei16_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrgatherei16_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint64m4_t test_vrgatherei16_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrgatherei16_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint16m2_t vs1, size_t vl) { +vuint64m8_t test_vrgatherei16_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vrgatherei16_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vrgatherei16_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_f16mf4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgatherei16_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vrgatherei16_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_f16mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgatherei16_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vfloat16m1_t test_vrgatherei16_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f16m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgatherei16_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vfloat16m2_t test_vrgatherei16_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f16m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgatherei16_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vfloat16m4_t test_vrgatherei16_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f16m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgatherei16_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vfloat16m8_t test_vrgatherei16_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f16m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_f32mf2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgatherei16_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vrgatherei16_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f32m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgatherei16_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vfloat32m2_t test_vrgatherei16_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f32m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgatherei16_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vfloat32m4_t test_vrgatherei16_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f32m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgatherei16_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vfloat32m8_t test_vrgatherei16_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f32m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgatherei16_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vuint16mf4_t vs1, size_t vl) { +vfloat64m1_t test_vrgatherei16_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f64m1_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgatherei16_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vuint16mf2_t vs1, size_t vl) { +vfloat64m2_t test_vrgatherei16_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f64m2_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgatherei16_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vuint16m1_t vs1, size_t vl) { +vfloat64m4_t test_vrgatherei16_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f64m4_tumu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgatherei16_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vuint16m2_t vs1, size_t vl) { +vfloat64m8_t test_vrgatherei16_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f64m8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrgatherei16_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { +vint8mf8_t test_vrgatherei16_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + vint8mf8_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrgatherei16_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { +vint8mf4_t test_vrgatherei16_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + vint8mf4_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrgatherei16_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { +vint8mf2_t test_vrgatherei16_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + vint8mf2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrgatherei16_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint16m2_t vs1, size_t vl) { +vint8m1_t test_vrgatherei16_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, + vint8m1_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrgatherei16_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint16m4_t vs1, size_t vl) { +vint8m2_t test_vrgatherei16_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, + vint8m2_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrgatherei16_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint16m8_t vs1, size_t vl) { +vint8m4_t test_vrgatherei16_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, + vint8m4_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrgatherei16_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vrgatherei16_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrgatherei16_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vrgatherei16_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrgatherei16_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vrgatherei16_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrgatherei16_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vrgatherei16_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, + vint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrgatherei16_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vrgatherei16_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, + vint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrgatherei16_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vint16m8_t test_vrgatherei16_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, + vint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrgatherei16_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vint32mf2_t test_vrgatherei16_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrgatherei16_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vint32m1_t test_vrgatherei16_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrgatherei16_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vint32m2_t test_vrgatherei16_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + vint32m2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrgatherei16_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vint32m4_t test_vrgatherei16_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, + vint32m4_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrgatherei16_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vint32m8_t test_vrgatherei16_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, + vint32m8_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrgatherei16_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { +vint64m1_t test_vrgatherei16_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrgatherei16_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint64m2_t test_vrgatherei16_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + vint64m2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrgatherei16_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint16m1_t vs1, size_t vl) { +vint64m4_t test_vrgatherei16_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + vint64m4_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrgatherei16_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint16m2_t vs1, size_t vl) { +vint64m8_t test_vrgatherei16_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, + vint64m8_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgatherei16_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint8mf8_t test_vrgatherei16_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgatherei16_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint8mf4_t test_vrgatherei16_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgatherei16_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint8mf2_t test_vrgatherei16_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrgatherei16_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint16m2_t vs1, size_t vl) { +vuint8m1_t test_vrgatherei16_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + vuint8m1_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrgatherei16_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint16m4_t vs1, size_t vl) { +vuint8m2_t test_vrgatherei16_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + vuint8m2_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrgatherei16_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint16m8_t vs1, size_t vl) { +vuint8m4_t test_vrgatherei16_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + vuint8m4_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgatherei16_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vrgatherei16_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgatherei16_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vrgatherei16_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrgatherei16_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vrgatherei16_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrgatherei16_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vrgatherei16_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrgatherei16_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vrgatherei16_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrgatherei16_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vrgatherei16_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgatherei16_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vrgatherei16_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrgatherei16_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vrgatherei16_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrgatherei16_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vrgatherei16_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrgatherei16_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vrgatherei16_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrgatherei16_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vrgatherei16_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrgatherei16_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint64m1_t test_vrgatherei16_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrgatherei16_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint64m2_t test_vrgatherei16_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrgatherei16_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint64m4_t test_vrgatherei16_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrgatherei16_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint16m2_t vs1, size_t vl) { +vuint64m8_t test_vrgatherei16_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vfloat16mf4_t test_vrgatherei16_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vfloat16mf4_t test_vrgatherei16_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_f16mf4_mu(vm, vd, vs2, vs1, vl); } -vfloat16mf2_t test_vrgatherei16_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vfloat16mf2_t test_vrgatherei16_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_f16mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat16m1_t test_vrgatherei16_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vfloat16m1_t test_vrgatherei16_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f16m1_mu(vm, vd, vs2, vs1, vl); } -vfloat16m2_t test_vrgatherei16_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vfloat16m2_t test_vrgatherei16_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f16m2_mu(vm, vd, vs2, vs1, vl); } -vfloat16m4_t test_vrgatherei16_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vfloat16m4_t test_vrgatherei16_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f16m4_mu(vm, vd, vs2, vs1, vl); } -vfloat16m8_t test_vrgatherei16_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vfloat16m8_t test_vrgatherei16_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f16m8_mu(vm, vd, vs2, vs1, vl); } -vfloat32mf2_t test_vrgatherei16_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vfloat32mf2_t test_vrgatherei16_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_f32mf2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m1_t test_vrgatherei16_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vfloat32m1_t test_vrgatherei16_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f32m1_mu(vm, vd, vs2, vs1, vl); } -vfloat32m2_t test_vrgatherei16_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vfloat32m2_t test_vrgatherei16_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f32m2_mu(vm, vd, vs2, vs1, vl); } -vfloat32m4_t test_vrgatherei16_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vfloat32m4_t test_vrgatherei16_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f32m4_mu(vm, vd, vs2, vs1, vl); } -vfloat32m8_t test_vrgatherei16_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vfloat32m8_t test_vrgatherei16_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f32m8_mu(vm, vd, vs2, vs1, vl); } -vfloat64m1_t test_vrgatherei16_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, vuint16mf4_t vs1, size_t vl) { +vfloat64m1_t test_vrgatherei16_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f64m1_mu(vm, vd, vs2, vs1, vl); } -vfloat64m2_t test_vrgatherei16_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, vuint16mf2_t vs1, size_t vl) { +vfloat64m2_t test_vrgatherei16_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f64m2_mu(vm, vd, vs2, vs1, vl); } -vfloat64m4_t test_vrgatherei16_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, vuint16m1_t vs1, size_t vl) { +vfloat64m4_t test_vrgatherei16_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f64m4_mu(vm, vd, vs2, vs1, vl); } -vfloat64m8_t test_vrgatherei16_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, vuint16m2_t vs1, size_t vl) { +vfloat64m8_t test_vrgatherei16_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_f64m8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vrgatherei16_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { +vint8mf8_t test_vrgatherei16_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, + vint8mf8_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vrgatherei16_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { +vint8mf4_t test_vrgatherei16_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, + vint8mf4_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vrgatherei16_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { +vint8mf2_t test_vrgatherei16_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, + vint8mf2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vrgatherei16_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint16m2_t vs1, size_t vl) { +vint8m1_t test_vrgatherei16_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vrgatherei16_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint16m4_t vs1, size_t vl) { +vint8m2_t test_vrgatherei16_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vrgatherei16_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint16m8_t vs1, size_t vl) { +vint8m4_t test_vrgatherei16_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vrgatherei16_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vrgatherei16_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vrgatherei16_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vrgatherei16_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vrgatherei16_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vrgatherei16_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vrgatherei16_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vrgatherei16_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vrgatherei16_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, + vint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vrgatherei16_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vrgatherei16_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, + vint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vrgatherei16_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vint16m8_t test_vrgatherei16_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, + vint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vrgatherei16_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vint32mf2_t test_vrgatherei16_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vrgatherei16_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vint32m1_t test_vrgatherei16_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vrgatherei16_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vint32m2_t test_vrgatherei16_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, + vint32m2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vrgatherei16_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vint32m4_t test_vrgatherei16_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, + vint32m4_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vrgatherei16_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vint32m8_t test_vrgatherei16_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, + vint32m8_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vrgatherei16_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { +vint64m1_t test_vrgatherei16_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vrgatherei16_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint64m2_t test_vrgatherei16_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, + vint64m2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vrgatherei16_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint16m1_t vs1, size_t vl) { +vint64m4_t test_vrgatherei16_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, + vint64m4_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vrgatherei16_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint16m2_t vs1, size_t vl) { +vint64m8_t test_vrgatherei16_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, + vint64m8_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vrgatherei16_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint8mf8_t test_vrgatherei16_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vrgatherei16_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint8mf4_t test_vrgatherei16_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vrgatherei16_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint8mf2_t test_vrgatherei16_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vrgatherei16_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint16m2_t vs1, size_t vl) { +vuint8m1_t test_vrgatherei16_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, + vuint8m1_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vrgatherei16_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint16m4_t vs1, size_t vl) { +vuint8m2_t test_vrgatherei16_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, + vuint8m2_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vrgatherei16_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint16m8_t vs1, size_t vl) { +vuint8m4_t test_vrgatherei16_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, + vuint8m4_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vrgatherei16_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vrgatherei16_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vrgatherei16_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vrgatherei16_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vrgatherei16_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vrgatherei16_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vrgatherei16_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vrgatherei16_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vrgatherei16_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vrgatherei16_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vrgatherei16_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vrgatherei16_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vrgatherei16_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vrgatherei16_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vrgatherei16_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vrgatherei16_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vrgatherei16_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vrgatherei16_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vrgatherei16_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vrgatherei16_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vrgatherei16_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vrgatherei16_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vrgatherei16_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint64m1_t test_vrgatherei16_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vrgatherei16_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint64m2_t test_vrgatherei16_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vrgatherei16_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint64m4_t test_vrgatherei16_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vrgatherei16_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint16m2_t vs1, size_t vl) { +vuint64m8_t test_vrgatherei16_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vrgatherei16_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vrsub.c b/auto-generated/policy_funcs/llvm-api-tests/vrsub.c index fb5476516..1fc5ea4e5 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vrsub.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vrsub.c @@ -5,706 +5,891 @@ #include -vint8mf8_t test_vrsub_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vrsub_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vrsub_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vrsub_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vrsub_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vrsub_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vrsub_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vrsub_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vrsub_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vrsub_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vrsub_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, + size_t vl) { return __riscv_vrsub_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vrsub_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vrsub_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vrsub_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vrsub_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vrsub_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vrsub_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vrsub_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vrsub_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vrsub_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vrsub_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vrsub_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vrsub_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vrsub_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vrsub_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vrsub_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vrsub_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vrsub_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, + size_t vl) { return __riscv_vrsub_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vrsub_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vrsub_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vrsub_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vrsub_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vrsub_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vrsub_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vrsub_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vrsub_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, + size_t vl) { return __riscv_vrsub_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vrsub_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vrsub_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vrsub_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vrsub_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vrsub_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, + size_t vl) { return __riscv_vrsub_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vrsub_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vrsub_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vrsub_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vrsub_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vrsub_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, + size_t vl) { return __riscv_vrsub_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vrsub_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vrsub_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, + size_t vl) { return __riscv_vrsub_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vrsub_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vrsub_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, + size_t vl) { return __riscv_vrsub_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vrsub_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vrsub_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, + size_t vl) { return __riscv_vrsub_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vrsub_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vrsub_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, + size_t vl) { return __riscv_vrsub_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vrsub_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vrsub_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, + size_t vl) { return __riscv_vrsub_vx_i64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vrsub_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vrsub_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vrsub_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vrsub_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vrsub_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vrsub_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vrsub_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vrsub_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vrsub_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vrsub_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vrsub_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vrsub_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vrsub_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vrsub_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vrsub_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vrsub_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vrsub_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vrsub_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vrsub_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vrsub_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vrsub_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vrsub_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vrsub_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vrsub_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vrsub_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vrsub_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vrsub_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vrsub_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vrsub_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vrsub_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vrsub_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vrsub_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vrsub_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vrsub_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vrsub_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vrsub_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vrsub_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vrsub_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vrsub_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vrsub_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vrsub_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vrsub_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vrsub_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vrsub_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vrsub_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vrsub_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vrsub_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vrsub_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vrsub_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vrsub_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vrsub_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vrsub_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vrsub_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vrsub_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vrsub_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vrsub_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vrsub_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vrsub_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vrsub_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vrsub_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vrsub_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vrsub_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vrsub_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vrsub_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vrsub_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vrsub_vx_u64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vrsub_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vrsub_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrsub_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vrsub_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vrsub_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrsub_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vrsub_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vrsub_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrsub_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vrsub_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vrsub_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrsub_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vrsub_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vrsub_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrsub_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vrsub_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vrsub_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrsub_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vrsub_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vrsub_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrsub_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vrsub_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vrsub_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vrsub_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vrsub_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vrsub_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vrsub_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vrsub_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vrsub_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vrsub_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vrsub_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vrsub_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vrsub_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vrsub_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vrsub_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vrsub_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vrsub_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vrsub_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vrsub_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vrsub_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vrsub_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vrsub_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vrsub_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vrsub_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vrsub_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vrsub_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vrsub_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vrsub_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vrsub_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vrsub_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vrsub_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vrsub_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vrsub_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vrsub_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vrsub_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vrsub_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vrsub_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vrsub_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vrsub_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vrsub_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vrsub_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vrsub_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vrsub_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vrsub_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vrsub_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vrsub_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vrsub_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vrsub_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vrsub_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vrsub_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vrsub_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vrsub_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vrsub_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vrsub_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vrsub_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vrsub_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vrsub_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vrsub_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vrsub_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vrsub_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vrsub_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vrsub_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vrsub_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vrsub_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vrsub_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vrsub_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vrsub_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vrsub_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vrsub_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vrsub_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vrsub_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vrsub_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vrsub_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vrsub_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vrsub_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vrsub_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vrsub_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vrsub_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vrsub_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vrsub_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vrsub_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vrsub_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vrsub_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vrsub_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vrsub_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vrsub_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vrsub_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vrsub_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vrsub_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vrsub_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vrsub_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vrsub_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vrsub_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vrsub_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vrsub_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vrsub_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vrsub_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vrsub_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vrsub_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { return __riscv_vrsub_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vrsub_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vrsub_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vrsub_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vrsub_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vrsub_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vrsub_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vrsub_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vrsub_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vrsub_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vrsub_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vrsub_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vrsub_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vrsub_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vrsub_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrsub_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vrsub_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vrsub_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrsub_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vrsub_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vrsub_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrsub_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vrsub_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vrsub_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrsub_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vrsub_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vrsub_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrsub_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vrsub_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vrsub_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrsub_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vrsub_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vrsub_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrsub_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vrsub_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vrsub_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vrsub_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vrsub_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vrsub_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vrsub_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vrsub_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vrsub_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vrsub_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vrsub_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vrsub_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vrsub_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vrsub_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vrsub_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vrsub_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vrsub_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vrsub_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vrsub_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vrsub_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vrsub_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vrsub_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vrsub_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vrsub_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vrsub_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vrsub_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vrsub_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vrsub_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vrsub_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vrsub_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vrsub_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vrsub_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vrsub_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vrsub_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vrsub_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vrsub_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vrsub_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vrsub_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vrsub_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vrsub_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vrsub_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vrsub_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vrsub_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vrsub_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vrsub_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vrsub_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vrsub_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vrsub_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vrsub_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vrsub_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vrsub_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vrsub_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vrsub_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vrsub_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vrsub_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vrsub_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vrsub_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vrsub_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vrsub_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vrsub_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vrsub_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vrsub_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vrsub_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vrsub_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vrsub_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vrsub_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vrsub_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vrsub_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vrsub_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vrsub_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vrsub_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vrsub_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vrsub_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vrsub_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vrsub_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vrsub_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vrsub_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vrsub_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vrsub_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vrsub_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vrsub_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vrsub_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vrsub_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vrsub_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vrsub_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vrsub_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vrsub_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vrsub_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vrsub_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vrsub_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vrsub_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vrsub_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vrsub_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vrsub_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vrsub_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vrsub_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vrsub_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vrsub_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vrsub_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { return __riscv_vrsub_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vrsub_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vrsub_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vrsub_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vrsub_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vrsub_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vrsub_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vrsub_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vrsub_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vrsub_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vrsub_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vrsub_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vrsub_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vrsub_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vrsub_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrsub_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vrsub_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vrsub_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrsub_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vrsub_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vrsub_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrsub_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vrsub_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vrsub_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrsub_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vrsub_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vrsub_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrsub_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vrsub_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vrsub_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrsub_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vrsub_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vrsub_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vrsub_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vrsub_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vrsub_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vrsub_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vrsub_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vrsub_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vrsub_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vrsub_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vrsub_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vrsub_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vrsub_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vrsub_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vrsub_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vrsub_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vrsub_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vrsub_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vrsub_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vrsub_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vrsub_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vrsub_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vrsub_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vrsub_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vrsub_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vrsub_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vrsub_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vrsub_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vrsub_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vrsub_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vrsub_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vrsub_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vrsub_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vrsub_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vrsub_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vrsub_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vrsub_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vrsub_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vrsub_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vrsub_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vrsub_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vrsub_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vrsub_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vrsub_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vrsub_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vrsub_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vrsub_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vrsub_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vrsub_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vrsub_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vrsub_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vrsub_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vrsub_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vrsub_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vrsub_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vrsub_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vrsub_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vrsub_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vrsub_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vrsub_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vrsub_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vrsub_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vrsub_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vrsub_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vrsub_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vrsub_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vrsub_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vrsub_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vrsub_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vrsub_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vrsub_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vrsub_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vrsub_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vrsub_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vrsub_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vrsub_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vrsub_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vrsub_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vrsub_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vrsub_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vrsub_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vrsub_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vrsub_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vrsub_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vrsub_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vrsub_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vrsub_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vrsub_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vrsub_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vrsub_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vrsub_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vrsub_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vrsub_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vrsub_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vrsub_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vrsub_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vrsub_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vrsub_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vrsub_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vrsub_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vrsub_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vrsub_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vrsub_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vrsub_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vrsub_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vrsub_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vrsub_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vrsub_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vrsub_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vrsub_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vrsub_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vrsub_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vrsub_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vrsub_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vsadd.c b/auto-generated/policy_funcs/llvm-api-tests/vsadd.c index 70424f5a2..2633927b0 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vsadd.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vsadd.c @@ -5,706 +5,891 @@ #include -vint8mf8_t test_vsadd_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vsadd_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, + size_t vl) { return __riscv_vsadd_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vsadd_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vsadd_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vsadd_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vsadd_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vsadd_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, + size_t vl) { return __riscv_vsadd_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vsadd_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vsadd_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vsadd_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vsadd_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vsadd_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, + size_t vl) { return __riscv_vsadd_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vsadd_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vsadd_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vsadd_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vsadd_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vsadd_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vsadd_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vsadd_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vsadd_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, + size_t vl) { return __riscv_vsadd_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vsadd_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vsadd_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, + size_t vl) { return __riscv_vsadd_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vsadd_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vsadd_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vsadd_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vsadd_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vsadd_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, + size_t vl) { return __riscv_vsadd_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vsadd_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vsadd_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vsadd_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vsadd_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vsadd_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, + size_t vl) { return __riscv_vsadd_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vsadd_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vsadd_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vsadd_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vsadd_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vsadd_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + vint16mf4_t vs1, size_t vl) { return __riscv_vsadd_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vsadd_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vsadd_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsadd_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vsadd_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vsadd_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vsadd_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vsadd_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vsadd_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsadd_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vsadd_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vsadd_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vsadd_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vsadd_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vsadd_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, + size_t vl) { return __riscv_vsadd_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vsadd_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vsadd_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, + size_t vl) { return __riscv_vsadd_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vsadd_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vsadd_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vsadd_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vsadd_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vsadd_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, + size_t vl) { return __riscv_vsadd_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vsadd_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vsadd_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vsadd_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vsadd_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vsadd_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, + size_t vl) { return __riscv_vsadd_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vsadd_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vsadd_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, + size_t vl) { return __riscv_vsadd_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vsadd_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vsadd_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vsadd_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vsadd_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vsadd_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsadd_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vsadd_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vsadd_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vsadd_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vsadd_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vsadd_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, + size_t vl) { return __riscv_vsadd_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vsadd_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vsadd_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, + size_t vl) { return __riscv_vsadd_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vsadd_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vsadd_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vsadd_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vsadd_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vsadd_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, + size_t vl) { return __riscv_vsadd_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vsadd_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vsadd_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, + size_t vl) { return __riscv_vsadd_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vsadd_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vsadd_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, + size_t vl) { return __riscv_vsadd_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vsadd_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vsadd_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, + size_t vl) { return __riscv_vsadd_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vsadd_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vsadd_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vsadd_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vsadd_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vsadd_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, + size_t vl) { return __riscv_vsadd_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vsadd_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vsadd_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, + size_t vl) { return __riscv_vsadd_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vsadd_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vsadd_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, + size_t vl) { return __riscv_vsadd_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vsadd_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vsadd_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, + size_t vl) { return __riscv_vsadd_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vsadd_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vsadd_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, + size_t vl) { return __riscv_vsadd_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vsadd_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vsadd_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, + size_t vl) { return __riscv_vsadd_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vsadd_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vsadd_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, + size_t vl) { return __riscv_vsadd_vx_i64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vsadd_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vsadd_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vsadd_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsadd_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vsadd_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsadd_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsadd_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vsadd_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vsadd_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsadd_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vsadd_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsadd_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsadd_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vsadd_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vsadd_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsadd_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vsadd_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsadd_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsadd_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vsadd_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vsadd_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsadd_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vsadd_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsadd_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsadd_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vsadd_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vsadd_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsadd_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vsadd_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsadd_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsadd_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vsadd_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vsadd_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsadd_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vsadd_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsadd_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsadd_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vsadd_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vsadd_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsadd_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vsadd_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsadd_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsadd_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vsadd_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vsadd_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsadd_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vsadd_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vsadd_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsadd_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vsadd_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vsadd_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsadd_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vsadd_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vsadd_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsadd_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vsadd_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vsadd_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsadd_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vsadd_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsadd_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsadd_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vsadd_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vsadd_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsadd_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vsadd_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsadd_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsadd_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vsadd_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vsadd_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsadd_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vsadd_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsadd_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsadd_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vsadd_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vsadd_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsadd_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vsadd_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsadd_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsadd_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vsadd_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vsadd_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsadd_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vsadd_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vsadd_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsadd_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vsadd_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vsadd_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsadd_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vsadd_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsadd_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsadd_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vsadd_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vsadd_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsadd_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vsadd_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsadd_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsadd_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vsadd_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vsadd_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsadd_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vsadd_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsadd_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsadd_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vsadd_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vsadd_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsadd_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vsadd_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsadd_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsadd_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vsadd_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vsadd_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsadd_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vsadd_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsadd_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsadd_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vsadd_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vsadd_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsadd_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vsadd_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsadd_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsadd_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vsadd_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vsadd_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsadd_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vsadd_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsadd_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsadd_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vsadd_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vsadd_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsadd_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vsadd_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsadd_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vsadd_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vsadd_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vsadd_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsadd_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vsadd_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsadd_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsadd_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vsadd_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vsadd_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsadd_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vsadd_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsadd_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsadd_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vsadd_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vsadd_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsadd_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vsadd_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsadd_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsadd_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vsadd_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vsadd_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsadd_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vsadd_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsadd_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsadd_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vsadd_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vsadd_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsadd_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vsadd_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsadd_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsadd_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vsadd_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vsadd_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsadd_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vsadd_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsadd_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsadd_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vsadd_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vsadd_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsadd_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vsadd_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsadd_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsadd_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vsadd_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vsadd_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsadd_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vsadd_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vsadd_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsadd_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vsadd_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vsadd_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsadd_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vsadd_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vsadd_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsadd_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vsadd_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vsadd_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsadd_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vsadd_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsadd_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsadd_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vsadd_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vsadd_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsadd_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vsadd_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsadd_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsadd_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vsadd_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vsadd_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsadd_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vsadd_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsadd_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsadd_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vsadd_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vsadd_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsadd_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vsadd_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsadd_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsadd_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vsadd_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vsadd_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsadd_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vsadd_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vsadd_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsadd_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vsadd_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vsadd_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsadd_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vsadd_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsadd_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsadd_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vsadd_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vsadd_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsadd_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vsadd_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsadd_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsadd_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vsadd_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vsadd_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsadd_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vsadd_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsadd_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsadd_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vsadd_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vsadd_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsadd_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vsadd_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsadd_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsadd_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vsadd_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vsadd_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsadd_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vsadd_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsadd_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsadd_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vsadd_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vsadd_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsadd_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vsadd_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsadd_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsadd_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vsadd_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vsadd_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsadd_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vsadd_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsadd_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsadd_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vsadd_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vsadd_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsadd_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vsadd_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsadd_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vsadd_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vsadd_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vsadd_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsadd_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vsadd_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsadd_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsadd_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vsadd_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vsadd_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsadd_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vsadd_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsadd_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsadd_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vsadd_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vsadd_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsadd_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vsadd_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsadd_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsadd_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vsadd_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vsadd_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsadd_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vsadd_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsadd_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsadd_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vsadd_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vsadd_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsadd_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vsadd_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsadd_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsadd_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vsadd_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vsadd_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsadd_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vsadd_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsadd_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsadd_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vsadd_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vsadd_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsadd_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vsadd_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsadd_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsadd_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vsadd_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vsadd_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsadd_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vsadd_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vsadd_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsadd_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vsadd_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vsadd_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsadd_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vsadd_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vsadd_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsadd_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vsadd_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vsadd_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsadd_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vsadd_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsadd_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsadd_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vsadd_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vsadd_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsadd_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vsadd_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsadd_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsadd_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vsadd_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vsadd_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsadd_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vsadd_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsadd_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsadd_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vsadd_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vsadd_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsadd_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vsadd_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsadd_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsadd_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vsadd_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vsadd_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsadd_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vsadd_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vsadd_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsadd_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vsadd_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vsadd_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsadd_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vsadd_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsadd_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsadd_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vsadd_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vsadd_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsadd_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vsadd_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsadd_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsadd_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vsadd_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vsadd_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsadd_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vsadd_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsadd_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsadd_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vsadd_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vsadd_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsadd_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vsadd_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsadd_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsadd_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vsadd_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vsadd_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsadd_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vsadd_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsadd_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsadd_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vsadd_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vsadd_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsadd_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vsadd_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsadd_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsadd_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vsadd_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vsadd_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsadd_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vsadd_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsadd_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsadd_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vsadd_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vsadd_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsadd_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vsadd_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsadd_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vsaddu.c b/auto-generated/policy_funcs/llvm-api-tests/vsaddu.c index 8caa28d1e..15da1dc08 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vsaddu.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vsaddu.c @@ -5,706 +5,957 @@ #include -vuint8mf8_t test_vsaddu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vsaddu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vsaddu_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vsaddu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vsaddu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vsaddu_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vsaddu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vsaddu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vsaddu_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vsaddu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vsaddu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vsaddu_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vsaddu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vsaddu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vsaddu_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vsaddu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vsaddu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vsaddu_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vsaddu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vsaddu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vsaddu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vsaddu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vsaddu_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vsaddu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vsaddu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vsaddu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vsaddu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vsaddu_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vsaddu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vsaddu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vsaddu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vsaddu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vsaddu_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vsaddu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vsaddu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vsaddu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vsaddu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vsaddu_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vsaddu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vsaddu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vsaddu_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vsaddu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vsaddu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vsaddu_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vsaddu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vsaddu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vsaddu_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vsaddu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vsaddu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vsaddu_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vsaddu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vsaddu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vsaddu_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vsaddu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vsaddu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vsaddu_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vsaddu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vsaddu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vsaddu_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vsaddu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vsaddu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vsaddu_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vsaddu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vsaddu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vsaddu_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vsaddu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vsaddu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vsaddu_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vsaddu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vsaddu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vsaddu_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vsaddu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vsaddu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vsaddu_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vsaddu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vsaddu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vsaddu_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vsaddu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vsaddu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vsaddu_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vsaddu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vsaddu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vsaddu_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vsaddu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vsaddu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vsaddu_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vsaddu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vsaddu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vsaddu_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vsaddu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vsaddu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vsaddu_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vsaddu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vsaddu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vsaddu_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vsaddu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vsaddu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vsaddu_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vsaddu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vsaddu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vsaddu_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vsaddu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vsaddu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vsaddu_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vsaddu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vsaddu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vsaddu_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vsaddu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vsaddu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vsaddu_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vsaddu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vsaddu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vsaddu_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vsaddu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vsaddu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vsaddu_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vsaddu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vsaddu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vsaddu_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vsaddu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vsaddu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vsaddu_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vsaddu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vsaddu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vsaddu_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vsaddu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vsaddu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vsaddu_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vsaddu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vsaddu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsaddu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vsaddu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vsaddu_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsaddu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vsaddu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsaddu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vsaddu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vsaddu_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsaddu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vsaddu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsaddu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vsaddu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vsaddu_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsaddu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vsaddu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vsaddu_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsaddu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vsaddu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vsaddu_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsaddu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vsaddu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vsaddu_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsaddu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vsaddu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vsaddu_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsaddu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vsaddu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vsaddu_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsaddu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vsaddu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vsaddu_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsaddu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vsaddu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vsaddu_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsaddu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vsaddu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vsaddu_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsaddu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vsaddu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsaddu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vsaddu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vsaddu_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsaddu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vsaddu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsaddu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vsaddu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vsaddu_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsaddu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vsaddu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsaddu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vsaddu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vsaddu_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsaddu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vsaddu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsaddu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vsaddu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vsaddu_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsaddu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vsaddu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsaddu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vsaddu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vsaddu_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsaddu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vsaddu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsaddu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vsaddu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vsaddu_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsaddu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vsaddu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsaddu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vsaddu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vsaddu_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsaddu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vsaddu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsaddu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vsaddu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vsaddu_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsaddu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vsaddu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsaddu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vsaddu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vsaddu_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsaddu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vsaddu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsaddu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vsaddu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vsaddu_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsaddu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vsaddu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsaddu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vsaddu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { return __riscv_vsaddu_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsaddu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vsaddu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsaddu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vsaddu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vsaddu_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsaddu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vsaddu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsaddu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vsaddu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vsaddu_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsaddu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vsaddu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsaddu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vsaddu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vsaddu_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsaddu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vsaddu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsaddu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vsaddu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vsaddu_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vsaddu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vsaddu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsaddu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vsaddu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vsaddu_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsaddu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vsaddu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsaddu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vsaddu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vsaddu_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsaddu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vsaddu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsaddu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vsaddu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vsaddu_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsaddu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vsaddu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vsaddu_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsaddu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vsaddu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vsaddu_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsaddu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vsaddu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vsaddu_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsaddu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vsaddu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vsaddu_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsaddu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vsaddu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vsaddu_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsaddu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vsaddu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vsaddu_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsaddu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vsaddu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vsaddu_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsaddu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vsaddu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vsaddu_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsaddu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vsaddu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsaddu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vsaddu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vsaddu_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsaddu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vsaddu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsaddu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vsaddu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vsaddu_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsaddu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vsaddu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsaddu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vsaddu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vsaddu_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsaddu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vsaddu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsaddu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vsaddu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vsaddu_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsaddu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vsaddu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsaddu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vsaddu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vsaddu_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsaddu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vsaddu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsaddu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vsaddu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vsaddu_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsaddu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vsaddu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsaddu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vsaddu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vsaddu_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsaddu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vsaddu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsaddu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vsaddu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vsaddu_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsaddu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vsaddu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsaddu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vsaddu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vsaddu_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsaddu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vsaddu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsaddu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vsaddu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vsaddu_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsaddu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vsaddu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsaddu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vsaddu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vsaddu_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsaddu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vsaddu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsaddu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vsaddu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vsaddu_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsaddu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vsaddu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsaddu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vsaddu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vsaddu_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsaddu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vsaddu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsaddu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vsaddu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vsaddu_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsaddu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vsaddu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsaddu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vsaddu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vsaddu_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vsaddu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vsaddu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsaddu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vsaddu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vsaddu_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsaddu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vsaddu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsaddu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vsaddu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vsaddu_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsaddu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vsaddu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsaddu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vsaddu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vsaddu_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsaddu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vsaddu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vsaddu_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsaddu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vsaddu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vsaddu_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsaddu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vsaddu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vsaddu_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsaddu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vsaddu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vsaddu_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsaddu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vsaddu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vsaddu_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsaddu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vsaddu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vsaddu_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsaddu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vsaddu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vsaddu_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsaddu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vsaddu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vsaddu_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsaddu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vsaddu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsaddu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vsaddu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vsaddu_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsaddu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vsaddu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsaddu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vsaddu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vsaddu_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsaddu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vsaddu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsaddu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vsaddu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vsaddu_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsaddu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vsaddu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsaddu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vsaddu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vsaddu_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsaddu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vsaddu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsaddu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vsaddu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vsaddu_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsaddu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vsaddu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsaddu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vsaddu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vsaddu_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsaddu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vsaddu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsaddu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vsaddu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vsaddu_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsaddu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vsaddu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsaddu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vsaddu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vsaddu_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsaddu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vsaddu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsaddu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vsaddu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vsaddu_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsaddu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vsaddu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsaddu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vsaddu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vsaddu_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsaddu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vsaddu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsaddu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vsaddu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { return __riscv_vsaddu_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsaddu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vsaddu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsaddu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vsaddu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vsaddu_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsaddu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vsaddu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsaddu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vsaddu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vsaddu_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsaddu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vsaddu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsaddu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vsaddu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vsaddu_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsaddu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vsaddu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vsaddu_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsaddu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vsaddu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vsaddu_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vsbc.c b/auto-generated/policy_funcs/llvm-api-tests/vsbc.c index 18048689e..a38608763 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vsbc.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vsbc.c @@ -5,354 +5,445 @@ #include -vint8mf8_t test_vsbc_vvm_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, vbool64_t v0, size_t vl) { +vint8mf8_t test_vsbc_vvm_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, + vbool64_t v0, size_t vl) { return __riscv_vsbc_vvm_i8mf8_tu(vd, vs2, vs1, v0, vl); } -vint8mf8_t test_vsbc_vxm_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, vbool64_t v0, size_t vl) { +vint8mf8_t test_vsbc_vxm_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, + vbool64_t v0, size_t vl) { return __riscv_vsbc_vxm_i8mf8_tu(vd, vs2, rs1, v0, vl); } -vint8mf4_t test_vsbc_vvm_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, vbool32_t v0, size_t vl) { +vint8mf4_t test_vsbc_vvm_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, + vbool32_t v0, size_t vl) { return __riscv_vsbc_vvm_i8mf4_tu(vd, vs2, vs1, v0, vl); } -vint8mf4_t test_vsbc_vxm_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, vbool32_t v0, size_t vl) { +vint8mf4_t test_vsbc_vxm_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, + vbool32_t v0, size_t vl) { return __riscv_vsbc_vxm_i8mf4_tu(vd, vs2, rs1, v0, vl); } -vint8mf2_t test_vsbc_vvm_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, vbool16_t v0, size_t vl) { +vint8mf2_t test_vsbc_vvm_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, + vbool16_t v0, size_t vl) { return __riscv_vsbc_vvm_i8mf2_tu(vd, vs2, vs1, v0, vl); } -vint8mf2_t test_vsbc_vxm_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, vbool16_t v0, size_t vl) { +vint8mf2_t test_vsbc_vxm_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, + vbool16_t v0, size_t vl) { return __riscv_vsbc_vxm_i8mf2_tu(vd, vs2, rs1, v0, vl); } -vint8m1_t test_vsbc_vvm_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, vbool8_t v0, size_t vl) { +vint8m1_t test_vsbc_vvm_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, + vbool8_t v0, size_t vl) { return __riscv_vsbc_vvm_i8m1_tu(vd, vs2, vs1, v0, vl); } -vint8m1_t test_vsbc_vxm_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, vbool8_t v0, size_t vl) { +vint8m1_t test_vsbc_vxm_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, + vbool8_t v0, size_t vl) { return __riscv_vsbc_vxm_i8m1_tu(vd, vs2, rs1, v0, vl); } -vint8m2_t test_vsbc_vvm_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, vbool4_t v0, size_t vl) { +vint8m2_t test_vsbc_vvm_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, + vbool4_t v0, size_t vl) { return __riscv_vsbc_vvm_i8m2_tu(vd, vs2, vs1, v0, vl); } -vint8m2_t test_vsbc_vxm_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, vbool4_t v0, size_t vl) { +vint8m2_t test_vsbc_vxm_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, + vbool4_t v0, size_t vl) { return __riscv_vsbc_vxm_i8m2_tu(vd, vs2, rs1, v0, vl); } -vint8m4_t test_vsbc_vvm_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, vbool2_t v0, size_t vl) { +vint8m4_t test_vsbc_vvm_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, + vbool2_t v0, size_t vl) { return __riscv_vsbc_vvm_i8m4_tu(vd, vs2, vs1, v0, vl); } -vint8m4_t test_vsbc_vxm_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, vbool2_t v0, size_t vl) { +vint8m4_t test_vsbc_vxm_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, + vbool2_t v0, size_t vl) { return __riscv_vsbc_vxm_i8m4_tu(vd, vs2, rs1, v0, vl); } -vint8m8_t test_vsbc_vvm_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, vbool1_t v0, size_t vl) { +vint8m8_t test_vsbc_vvm_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, + vbool1_t v0, size_t vl) { return __riscv_vsbc_vvm_i8m8_tu(vd, vs2, vs1, v0, vl); } -vint8m8_t test_vsbc_vxm_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, vbool1_t v0, size_t vl) { +vint8m8_t test_vsbc_vxm_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, + vbool1_t v0, size_t vl) { return __riscv_vsbc_vxm_i8m8_tu(vd, vs2, rs1, v0, vl); } -vint16mf4_t test_vsbc_vvm_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, vbool64_t v0, size_t vl) { +vint16mf4_t test_vsbc_vvm_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + vint16mf4_t vs1, vbool64_t v0, size_t vl) { return __riscv_vsbc_vvm_i16mf4_tu(vd, vs2, vs1, v0, vl); } -vint16mf4_t test_vsbc_vxm_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, vbool64_t v0, size_t vl) { +vint16mf4_t test_vsbc_vxm_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + int16_t rs1, vbool64_t v0, size_t vl) { return __riscv_vsbc_vxm_i16mf4_tu(vd, vs2, rs1, v0, vl); } -vint16mf2_t test_vsbc_vvm_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, vbool32_t v0, size_t vl) { +vint16mf2_t test_vsbc_vvm_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + vint16mf2_t vs1, vbool32_t v0, size_t vl) { return __riscv_vsbc_vvm_i16mf2_tu(vd, vs2, vs1, v0, vl); } -vint16mf2_t test_vsbc_vxm_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, vbool32_t v0, size_t vl) { +vint16mf2_t test_vsbc_vxm_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + int16_t rs1, vbool32_t v0, size_t vl) { return __riscv_vsbc_vxm_i16mf2_tu(vd, vs2, rs1, v0, vl); } -vint16m1_t test_vsbc_vvm_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, vbool16_t v0, size_t vl) { +vint16m1_t test_vsbc_vvm_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, + vbool16_t v0, size_t vl) { return __riscv_vsbc_vvm_i16m1_tu(vd, vs2, vs1, v0, vl); } -vint16m1_t test_vsbc_vxm_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, vbool16_t v0, size_t vl) { +vint16m1_t test_vsbc_vxm_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, + vbool16_t v0, size_t vl) { return __riscv_vsbc_vxm_i16m1_tu(vd, vs2, rs1, v0, vl); } -vint16m2_t test_vsbc_vvm_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, vbool8_t v0, size_t vl) { +vint16m2_t test_vsbc_vvm_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, + vbool8_t v0, size_t vl) { return __riscv_vsbc_vvm_i16m2_tu(vd, vs2, vs1, v0, vl); } -vint16m2_t test_vsbc_vxm_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, vbool8_t v0, size_t vl) { +vint16m2_t test_vsbc_vxm_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, + vbool8_t v0, size_t vl) { return __riscv_vsbc_vxm_i16m2_tu(vd, vs2, rs1, v0, vl); } -vint16m4_t test_vsbc_vvm_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, vbool4_t v0, size_t vl) { +vint16m4_t test_vsbc_vvm_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, + vbool4_t v0, size_t vl) { return __riscv_vsbc_vvm_i16m4_tu(vd, vs2, vs1, v0, vl); } -vint16m4_t test_vsbc_vxm_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, vbool4_t v0, size_t vl) { +vint16m4_t test_vsbc_vxm_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, + vbool4_t v0, size_t vl) { return __riscv_vsbc_vxm_i16m4_tu(vd, vs2, rs1, v0, vl); } -vint16m8_t test_vsbc_vvm_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, vbool2_t v0, size_t vl) { +vint16m8_t test_vsbc_vvm_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, + vbool2_t v0, size_t vl) { return __riscv_vsbc_vvm_i16m8_tu(vd, vs2, vs1, v0, vl); } -vint16m8_t test_vsbc_vxm_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, vbool2_t v0, size_t vl) { +vint16m8_t test_vsbc_vxm_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, + vbool2_t v0, size_t vl) { return __riscv_vsbc_vxm_i16m8_tu(vd, vs2, rs1, v0, vl); } -vint32mf2_t test_vsbc_vvm_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, vbool64_t v0, size_t vl) { +vint32mf2_t test_vsbc_vvm_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + vint32mf2_t vs1, vbool64_t v0, size_t vl) { return __riscv_vsbc_vvm_i32mf2_tu(vd, vs2, vs1, v0, vl); } -vint32mf2_t test_vsbc_vxm_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, vbool64_t v0, size_t vl) { +vint32mf2_t test_vsbc_vxm_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + int32_t rs1, vbool64_t v0, size_t vl) { return __riscv_vsbc_vxm_i32mf2_tu(vd, vs2, rs1, v0, vl); } -vint32m1_t test_vsbc_vvm_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, vbool32_t v0, size_t vl) { +vint32m1_t test_vsbc_vvm_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, + vbool32_t v0, size_t vl) { return __riscv_vsbc_vvm_i32m1_tu(vd, vs2, vs1, v0, vl); } -vint32m1_t test_vsbc_vxm_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, vbool32_t v0, size_t vl) { +vint32m1_t test_vsbc_vxm_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, + vbool32_t v0, size_t vl) { return __riscv_vsbc_vxm_i32m1_tu(vd, vs2, rs1, v0, vl); } -vint32m2_t test_vsbc_vvm_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, vbool16_t v0, size_t vl) { +vint32m2_t test_vsbc_vvm_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, + vbool16_t v0, size_t vl) { return __riscv_vsbc_vvm_i32m2_tu(vd, vs2, vs1, v0, vl); } -vint32m2_t test_vsbc_vxm_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, vbool16_t v0, size_t vl) { +vint32m2_t test_vsbc_vxm_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, + vbool16_t v0, size_t vl) { return __riscv_vsbc_vxm_i32m2_tu(vd, vs2, rs1, v0, vl); } -vint32m4_t test_vsbc_vvm_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, vbool8_t v0, size_t vl) { +vint32m4_t test_vsbc_vvm_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, + vbool8_t v0, size_t vl) { return __riscv_vsbc_vvm_i32m4_tu(vd, vs2, vs1, v0, vl); } -vint32m4_t test_vsbc_vxm_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, vbool8_t v0, size_t vl) { +vint32m4_t test_vsbc_vxm_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, + vbool8_t v0, size_t vl) { return __riscv_vsbc_vxm_i32m4_tu(vd, vs2, rs1, v0, vl); } -vint32m8_t test_vsbc_vvm_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, vbool4_t v0, size_t vl) { +vint32m8_t test_vsbc_vvm_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, + vbool4_t v0, size_t vl) { return __riscv_vsbc_vvm_i32m8_tu(vd, vs2, vs1, v0, vl); } -vint32m8_t test_vsbc_vxm_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, vbool4_t v0, size_t vl) { +vint32m8_t test_vsbc_vxm_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, + vbool4_t v0, size_t vl) { return __riscv_vsbc_vxm_i32m8_tu(vd, vs2, rs1, v0, vl); } -vint64m1_t test_vsbc_vvm_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, vbool64_t v0, size_t vl) { +vint64m1_t test_vsbc_vvm_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, + vbool64_t v0, size_t vl) { return __riscv_vsbc_vvm_i64m1_tu(vd, vs2, vs1, v0, vl); } -vint64m1_t test_vsbc_vxm_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, vbool64_t v0, size_t vl) { +vint64m1_t test_vsbc_vxm_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, + vbool64_t v0, size_t vl) { return __riscv_vsbc_vxm_i64m1_tu(vd, vs2, rs1, v0, vl); } -vint64m2_t test_vsbc_vvm_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, vbool32_t v0, size_t vl) { +vint64m2_t test_vsbc_vvm_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, + vbool32_t v0, size_t vl) { return __riscv_vsbc_vvm_i64m2_tu(vd, vs2, vs1, v0, vl); } -vint64m2_t test_vsbc_vxm_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, vbool32_t v0, size_t vl) { +vint64m2_t test_vsbc_vxm_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, + vbool32_t v0, size_t vl) { return __riscv_vsbc_vxm_i64m2_tu(vd, vs2, rs1, v0, vl); } -vint64m4_t test_vsbc_vvm_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, vbool16_t v0, size_t vl) { +vint64m4_t test_vsbc_vvm_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, + vbool16_t v0, size_t vl) { return __riscv_vsbc_vvm_i64m4_tu(vd, vs2, vs1, v0, vl); } -vint64m4_t test_vsbc_vxm_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, vbool16_t v0, size_t vl) { +vint64m4_t test_vsbc_vxm_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, + vbool16_t v0, size_t vl) { return __riscv_vsbc_vxm_i64m4_tu(vd, vs2, rs1, v0, vl); } -vint64m8_t test_vsbc_vvm_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, vbool8_t v0, size_t vl) { +vint64m8_t test_vsbc_vvm_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, + vbool8_t v0, size_t vl) { return __riscv_vsbc_vvm_i64m8_tu(vd, vs2, vs1, v0, vl); } -vint64m8_t test_vsbc_vxm_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, vbool8_t v0, size_t vl) { +vint64m8_t test_vsbc_vxm_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, + vbool8_t v0, size_t vl) { return __riscv_vsbc_vxm_i64m8_tu(vd, vs2, rs1, v0, vl); } -vuint8mf8_t test_vsbc_vvm_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, vbool64_t v0, size_t vl) { +vuint8mf8_t test_vsbc_vvm_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, vbool64_t v0, size_t vl) { return __riscv_vsbc_vvm_u8mf8_tu(vd, vs2, vs1, v0, vl); } -vuint8mf8_t test_vsbc_vxm_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, vbool64_t v0, size_t vl) { +vuint8mf8_t test_vsbc_vxm_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, + vbool64_t v0, size_t vl) { return __riscv_vsbc_vxm_u8mf8_tu(vd, vs2, rs1, v0, vl); } -vuint8mf4_t test_vsbc_vvm_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, vbool32_t v0, size_t vl) { +vuint8mf4_t test_vsbc_vvm_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, vbool32_t v0, size_t vl) { return __riscv_vsbc_vvm_u8mf4_tu(vd, vs2, vs1, v0, vl); } -vuint8mf4_t test_vsbc_vxm_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, vbool32_t v0, size_t vl) { +vuint8mf4_t test_vsbc_vxm_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, + vbool32_t v0, size_t vl) { return __riscv_vsbc_vxm_u8mf4_tu(vd, vs2, rs1, v0, vl); } -vuint8mf2_t test_vsbc_vvm_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, vbool16_t v0, size_t vl) { +vuint8mf2_t test_vsbc_vvm_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, vbool16_t v0, size_t vl) { return __riscv_vsbc_vvm_u8mf2_tu(vd, vs2, vs1, v0, vl); } -vuint8mf2_t test_vsbc_vxm_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, vbool16_t v0, size_t vl) { +vuint8mf2_t test_vsbc_vxm_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, + vbool16_t v0, size_t vl) { return __riscv_vsbc_vxm_u8mf2_tu(vd, vs2, rs1, v0, vl); } -vuint8m1_t test_vsbc_vvm_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, vbool8_t v0, size_t vl) { +vuint8m1_t test_vsbc_vvm_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + vbool8_t v0, size_t vl) { return __riscv_vsbc_vvm_u8m1_tu(vd, vs2, vs1, v0, vl); } -vuint8m1_t test_vsbc_vxm_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, vbool8_t v0, size_t vl) { +vuint8m1_t test_vsbc_vxm_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + vbool8_t v0, size_t vl) { return __riscv_vsbc_vxm_u8m1_tu(vd, vs2, rs1, v0, vl); } -vuint8m2_t test_vsbc_vvm_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, vbool4_t v0, size_t vl) { +vuint8m2_t test_vsbc_vvm_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + vbool4_t v0, size_t vl) { return __riscv_vsbc_vvm_u8m2_tu(vd, vs2, vs1, v0, vl); } -vuint8m2_t test_vsbc_vxm_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, vbool4_t v0, size_t vl) { +vuint8m2_t test_vsbc_vxm_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + vbool4_t v0, size_t vl) { return __riscv_vsbc_vxm_u8m2_tu(vd, vs2, rs1, v0, vl); } -vuint8m4_t test_vsbc_vvm_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, vbool2_t v0, size_t vl) { +vuint8m4_t test_vsbc_vvm_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + vbool2_t v0, size_t vl) { return __riscv_vsbc_vvm_u8m4_tu(vd, vs2, vs1, v0, vl); } -vuint8m4_t test_vsbc_vxm_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, vbool2_t v0, size_t vl) { +vuint8m4_t test_vsbc_vxm_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + vbool2_t v0, size_t vl) { return __riscv_vsbc_vxm_u8m4_tu(vd, vs2, rs1, v0, vl); } -vuint8m8_t test_vsbc_vvm_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, vbool1_t v0, size_t vl) { +vuint8m8_t test_vsbc_vvm_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + vbool1_t v0, size_t vl) { return __riscv_vsbc_vvm_u8m8_tu(vd, vs2, vs1, v0, vl); } -vuint8m8_t test_vsbc_vxm_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, vbool1_t v0, size_t vl) { +vuint8m8_t test_vsbc_vxm_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + vbool1_t v0, size_t vl) { return __riscv_vsbc_vxm_u8m8_tu(vd, vs2, rs1, v0, vl); } -vuint16mf4_t test_vsbc_vvm_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, vbool64_t v0, size_t vl) { +vuint16mf4_t test_vsbc_vvm_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, vbool64_t v0, + size_t vl) { return __riscv_vsbc_vvm_u16mf4_tu(vd, vs2, vs1, v0, vl); } -vuint16mf4_t test_vsbc_vxm_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, vbool64_t v0, size_t vl) { +vuint16mf4_t test_vsbc_vxm_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, vbool64_t v0, size_t vl) { return __riscv_vsbc_vxm_u16mf4_tu(vd, vs2, rs1, v0, vl); } -vuint16mf2_t test_vsbc_vvm_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, vbool32_t v0, size_t vl) { +vuint16mf2_t test_vsbc_vvm_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, vbool32_t v0, + size_t vl) { return __riscv_vsbc_vvm_u16mf2_tu(vd, vs2, vs1, v0, vl); } -vuint16mf2_t test_vsbc_vxm_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, vbool32_t v0, size_t vl) { +vuint16mf2_t test_vsbc_vxm_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, vbool32_t v0, size_t vl) { return __riscv_vsbc_vxm_u16mf2_tu(vd, vs2, rs1, v0, vl); } -vuint16m1_t test_vsbc_vvm_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, vbool16_t v0, size_t vl) { +vuint16m1_t test_vsbc_vvm_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, vbool16_t v0, size_t vl) { return __riscv_vsbc_vvm_u16m1_tu(vd, vs2, vs1, v0, vl); } -vuint16m1_t test_vsbc_vxm_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, vbool16_t v0, size_t vl) { +vuint16m1_t test_vsbc_vxm_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, vbool16_t v0, size_t vl) { return __riscv_vsbc_vxm_u16m1_tu(vd, vs2, rs1, v0, vl); } -vuint16m2_t test_vsbc_vvm_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, vbool8_t v0, size_t vl) { +vuint16m2_t test_vsbc_vvm_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, vbool8_t v0, size_t vl) { return __riscv_vsbc_vvm_u16m2_tu(vd, vs2, vs1, v0, vl); } -vuint16m2_t test_vsbc_vxm_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, vbool8_t v0, size_t vl) { +vuint16m2_t test_vsbc_vxm_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, vbool8_t v0, size_t vl) { return __riscv_vsbc_vxm_u16m2_tu(vd, vs2, rs1, v0, vl); } -vuint16m4_t test_vsbc_vvm_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, vbool4_t v0, size_t vl) { +vuint16m4_t test_vsbc_vvm_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, vbool4_t v0, size_t vl) { return __riscv_vsbc_vvm_u16m4_tu(vd, vs2, vs1, v0, vl); } -vuint16m4_t test_vsbc_vxm_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, vbool4_t v0, size_t vl) { +vuint16m4_t test_vsbc_vxm_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, vbool4_t v0, size_t vl) { return __riscv_vsbc_vxm_u16m4_tu(vd, vs2, rs1, v0, vl); } -vuint16m8_t test_vsbc_vvm_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, vbool2_t v0, size_t vl) { +vuint16m8_t test_vsbc_vvm_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, vbool2_t v0, size_t vl) { return __riscv_vsbc_vvm_u16m8_tu(vd, vs2, vs1, v0, vl); } -vuint16m8_t test_vsbc_vxm_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, vbool2_t v0, size_t vl) { +vuint16m8_t test_vsbc_vxm_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, vbool2_t v0, size_t vl) { return __riscv_vsbc_vxm_u16m8_tu(vd, vs2, rs1, v0, vl); } -vuint32mf2_t test_vsbc_vvm_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, vbool64_t v0, size_t vl) { +vuint32mf2_t test_vsbc_vvm_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, vbool64_t v0, + size_t vl) { return __riscv_vsbc_vvm_u32mf2_tu(vd, vs2, vs1, v0, vl); } -vuint32mf2_t test_vsbc_vxm_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, vbool64_t v0, size_t vl) { +vuint32mf2_t test_vsbc_vxm_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, vbool64_t v0, size_t vl) { return __riscv_vsbc_vxm_u32mf2_tu(vd, vs2, rs1, v0, vl); } -vuint32m1_t test_vsbc_vvm_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, vbool32_t v0, size_t vl) { +vuint32m1_t test_vsbc_vvm_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, vbool32_t v0, size_t vl) { return __riscv_vsbc_vvm_u32m1_tu(vd, vs2, vs1, v0, vl); } -vuint32m1_t test_vsbc_vxm_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, vbool32_t v0, size_t vl) { +vuint32m1_t test_vsbc_vxm_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, vbool32_t v0, size_t vl) { return __riscv_vsbc_vxm_u32m1_tu(vd, vs2, rs1, v0, vl); } -vuint32m2_t test_vsbc_vvm_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, vbool16_t v0, size_t vl) { +vuint32m2_t test_vsbc_vvm_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, vbool16_t v0, size_t vl) { return __riscv_vsbc_vvm_u32m2_tu(vd, vs2, vs1, v0, vl); } -vuint32m2_t test_vsbc_vxm_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, vbool16_t v0, size_t vl) { +vuint32m2_t test_vsbc_vxm_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, vbool16_t v0, size_t vl) { return __riscv_vsbc_vxm_u32m2_tu(vd, vs2, rs1, v0, vl); } -vuint32m4_t test_vsbc_vvm_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, vbool8_t v0, size_t vl) { +vuint32m4_t test_vsbc_vvm_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, vbool8_t v0, size_t vl) { return __riscv_vsbc_vvm_u32m4_tu(vd, vs2, vs1, v0, vl); } -vuint32m4_t test_vsbc_vxm_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, vbool8_t v0, size_t vl) { +vuint32m4_t test_vsbc_vxm_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, vbool8_t v0, size_t vl) { return __riscv_vsbc_vxm_u32m4_tu(vd, vs2, rs1, v0, vl); } -vuint32m8_t test_vsbc_vvm_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, vbool4_t v0, size_t vl) { +vuint32m8_t test_vsbc_vvm_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, vbool4_t v0, size_t vl) { return __riscv_vsbc_vvm_u32m8_tu(vd, vs2, vs1, v0, vl); } -vuint32m8_t test_vsbc_vxm_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, vbool4_t v0, size_t vl) { +vuint32m8_t test_vsbc_vxm_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, vbool4_t v0, size_t vl) { return __riscv_vsbc_vxm_u32m8_tu(vd, vs2, rs1, v0, vl); } -vuint64m1_t test_vsbc_vvm_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, vbool64_t v0, size_t vl) { +vuint64m1_t test_vsbc_vvm_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, vbool64_t v0, size_t vl) { return __riscv_vsbc_vvm_u64m1_tu(vd, vs2, vs1, v0, vl); } -vuint64m1_t test_vsbc_vxm_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, vbool64_t v0, size_t vl) { +vuint64m1_t test_vsbc_vxm_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, vbool64_t v0, size_t vl) { return __riscv_vsbc_vxm_u64m1_tu(vd, vs2, rs1, v0, vl); } -vuint64m2_t test_vsbc_vvm_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, vbool32_t v0, size_t vl) { +vuint64m2_t test_vsbc_vvm_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, vbool32_t v0, size_t vl) { return __riscv_vsbc_vvm_u64m2_tu(vd, vs2, vs1, v0, vl); } -vuint64m2_t test_vsbc_vxm_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, vbool32_t v0, size_t vl) { +vuint64m2_t test_vsbc_vxm_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, vbool32_t v0, size_t vl) { return __riscv_vsbc_vxm_u64m2_tu(vd, vs2, rs1, v0, vl); } -vuint64m4_t test_vsbc_vvm_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, vbool16_t v0, size_t vl) { +vuint64m4_t test_vsbc_vvm_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, vbool16_t v0, size_t vl) { return __riscv_vsbc_vvm_u64m4_tu(vd, vs2, vs1, v0, vl); } -vuint64m4_t test_vsbc_vxm_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, vbool16_t v0, size_t vl) { +vuint64m4_t test_vsbc_vxm_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, vbool16_t v0, size_t vl) { return __riscv_vsbc_vxm_u64m4_tu(vd, vs2, rs1, v0, vl); } -vuint64m8_t test_vsbc_vvm_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, vbool8_t v0, size_t vl) { +vuint64m8_t test_vsbc_vvm_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, vbool8_t v0, size_t vl) { return __riscv_vsbc_vvm_u64m8_tu(vd, vs2, vs1, v0, vl); } -vuint64m8_t test_vsbc_vxm_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, vbool8_t v0, size_t vl) { +vuint64m8_t test_vsbc_vxm_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, vbool8_t v0, size_t vl) { return __riscv_vsbc_vxm_u64m8_tu(vd, vs2, rs1, v0, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vsetvl.c b/auto-generated/policy_funcs/llvm-api-tests/vsetvl.c index 0e7b7dda7..994e9aa38 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vsetvl.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vsetvl.c @@ -5,90 +5,46 @@ #include -size_t test_vsetvl_e8mf8(size_t avl) { - return __riscv_vsetvl_e8mf8(avl); -} +size_t test_vsetvl_e8mf8(size_t avl) { return __riscv_vsetvl_e8mf8(avl); } -size_t test_vsetvl_e8mf4(size_t avl) { - return __riscv_vsetvl_e8mf4(avl); -} +size_t test_vsetvl_e8mf4(size_t avl) { return __riscv_vsetvl_e8mf4(avl); } -size_t test_vsetvl_e8mf2(size_t avl) { - return __riscv_vsetvl_e8mf2(avl); -} +size_t test_vsetvl_e8mf2(size_t avl) { return __riscv_vsetvl_e8mf2(avl); } -size_t test_vsetvl_e8m1(size_t avl) { - return __riscv_vsetvl_e8m1(avl); -} +size_t test_vsetvl_e8m1(size_t avl) { return __riscv_vsetvl_e8m1(avl); } -size_t test_vsetvl_e8m2(size_t avl) { - return __riscv_vsetvl_e8m2(avl); -} +size_t test_vsetvl_e8m2(size_t avl) { return __riscv_vsetvl_e8m2(avl); } -size_t test_vsetvl_e8m4(size_t avl) { - return __riscv_vsetvl_e8m4(avl); -} +size_t test_vsetvl_e8m4(size_t avl) { return __riscv_vsetvl_e8m4(avl); } -size_t test_vsetvl_e8m8(size_t avl) { - return __riscv_vsetvl_e8m8(avl); -} +size_t test_vsetvl_e8m8(size_t avl) { return __riscv_vsetvl_e8m8(avl); } -size_t test_vsetvl_e16mf4(size_t avl) { - return __riscv_vsetvl_e16mf4(avl); -} +size_t test_vsetvl_e16mf4(size_t avl) { return __riscv_vsetvl_e16mf4(avl); } -size_t test_vsetvl_e16mf2(size_t avl) { - return __riscv_vsetvl_e16mf2(avl); -} +size_t test_vsetvl_e16mf2(size_t avl) { return __riscv_vsetvl_e16mf2(avl); } -size_t test_vsetvl_e16m1(size_t avl) { - return __riscv_vsetvl_e16m1(avl); -} +size_t test_vsetvl_e16m1(size_t avl) { return __riscv_vsetvl_e16m1(avl); } -size_t test_vsetvl_e16m2(size_t avl) { - return __riscv_vsetvl_e16m2(avl); -} +size_t test_vsetvl_e16m2(size_t avl) { return __riscv_vsetvl_e16m2(avl); } -size_t test_vsetvl_e16m4(size_t avl) { - return __riscv_vsetvl_e16m4(avl); -} +size_t test_vsetvl_e16m4(size_t avl) { return __riscv_vsetvl_e16m4(avl); } -size_t test_vsetvl_e16m8(size_t avl) { - return __riscv_vsetvl_e16m8(avl); -} +size_t test_vsetvl_e16m8(size_t avl) { return __riscv_vsetvl_e16m8(avl); } -size_t test_vsetvl_e32mf2(size_t avl) { - return __riscv_vsetvl_e32mf2(avl); -} +size_t test_vsetvl_e32mf2(size_t avl) { return __riscv_vsetvl_e32mf2(avl); } -size_t test_vsetvl_e32m1(size_t avl) { - return __riscv_vsetvl_e32m1(avl); -} +size_t test_vsetvl_e32m1(size_t avl) { return __riscv_vsetvl_e32m1(avl); } -size_t test_vsetvl_e32m2(size_t avl) { - return __riscv_vsetvl_e32m2(avl); -} +size_t test_vsetvl_e32m2(size_t avl) { return __riscv_vsetvl_e32m2(avl); } -size_t test_vsetvl_e32m4(size_t avl) { - return __riscv_vsetvl_e32m4(avl); -} +size_t test_vsetvl_e32m4(size_t avl) { return __riscv_vsetvl_e32m4(avl); } -size_t test_vsetvl_e32m8(size_t avl) { - return __riscv_vsetvl_e32m8(avl); -} +size_t test_vsetvl_e32m8(size_t avl) { return __riscv_vsetvl_e32m8(avl); } -size_t test_vsetvl_e64m1(size_t avl) { - return __riscv_vsetvl_e64m1(avl); -} +size_t test_vsetvl_e64m1(size_t avl) { return __riscv_vsetvl_e64m1(avl); } -size_t test_vsetvl_e64m2(size_t avl) { - return __riscv_vsetvl_e64m2(avl); -} +size_t test_vsetvl_e64m2(size_t avl) { return __riscv_vsetvl_e64m2(avl); } -size_t test_vsetvl_e64m4(size_t avl) { - return __riscv_vsetvl_e64m4(avl); -} +size_t test_vsetvl_e64m4(size_t avl) { return __riscv_vsetvl_e64m4(avl); } -size_t test_vsetvl_e64m8(size_t avl) { - return __riscv_vsetvl_e64m8(avl); -} +size_t test_vsetvl_e64m8(size_t avl) { return __riscv_vsetvl_e64m8(avl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vsetvlmax.c b/auto-generated/policy_funcs/llvm-api-tests/vsetvlmax.c index 77eb6af38..b5d48608b 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vsetvlmax.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vsetvlmax.c @@ -5,90 +5,46 @@ #include -size_t test_vsetvlmax_e8mf8() { - return __riscv_vsetvlmax_e8mf8(); -} +size_t test_vsetvlmax_e8mf8() { return __riscv_vsetvlmax_e8mf8(); } -size_t test_vsetvlmax_e8mf4() { - return __riscv_vsetvlmax_e8mf4(); -} +size_t test_vsetvlmax_e8mf4() { return __riscv_vsetvlmax_e8mf4(); } -size_t test_vsetvlmax_e8mf2() { - return __riscv_vsetvlmax_e8mf2(); -} +size_t test_vsetvlmax_e8mf2() { return __riscv_vsetvlmax_e8mf2(); } -size_t test_vsetvlmax_e8m1() { - return __riscv_vsetvlmax_e8m1(); -} +size_t test_vsetvlmax_e8m1() { return __riscv_vsetvlmax_e8m1(); } -size_t test_vsetvlmax_e8m2() { - return __riscv_vsetvlmax_e8m2(); -} +size_t test_vsetvlmax_e8m2() { return __riscv_vsetvlmax_e8m2(); } -size_t test_vsetvlmax_e8m4() { - return __riscv_vsetvlmax_e8m4(); -} +size_t test_vsetvlmax_e8m4() { return __riscv_vsetvlmax_e8m4(); } -size_t test_vsetvlmax_e8m8() { - return __riscv_vsetvlmax_e8m8(); -} +size_t test_vsetvlmax_e8m8() { return __riscv_vsetvlmax_e8m8(); } -size_t test_vsetvlmax_e16mf4() { - return __riscv_vsetvlmax_e16mf4(); -} +size_t test_vsetvlmax_e16mf4() { return __riscv_vsetvlmax_e16mf4(); } -size_t test_vsetvlmax_e16mf2() { - return __riscv_vsetvlmax_e16mf2(); -} +size_t test_vsetvlmax_e16mf2() { return __riscv_vsetvlmax_e16mf2(); } -size_t test_vsetvlmax_e16m1() { - return __riscv_vsetvlmax_e16m1(); -} +size_t test_vsetvlmax_e16m1() { return __riscv_vsetvlmax_e16m1(); } -size_t test_vsetvlmax_e16m2() { - return __riscv_vsetvlmax_e16m2(); -} +size_t test_vsetvlmax_e16m2() { return __riscv_vsetvlmax_e16m2(); } -size_t test_vsetvlmax_e16m4() { - return __riscv_vsetvlmax_e16m4(); -} +size_t test_vsetvlmax_e16m4() { return __riscv_vsetvlmax_e16m4(); } -size_t test_vsetvlmax_e16m8() { - return __riscv_vsetvlmax_e16m8(); -} +size_t test_vsetvlmax_e16m8() { return __riscv_vsetvlmax_e16m8(); } -size_t test_vsetvlmax_e32mf2() { - return __riscv_vsetvlmax_e32mf2(); -} +size_t test_vsetvlmax_e32mf2() { return __riscv_vsetvlmax_e32mf2(); } -size_t test_vsetvlmax_e32m1() { - return __riscv_vsetvlmax_e32m1(); -} +size_t test_vsetvlmax_e32m1() { return __riscv_vsetvlmax_e32m1(); } -size_t test_vsetvlmax_e32m2() { - return __riscv_vsetvlmax_e32m2(); -} +size_t test_vsetvlmax_e32m2() { return __riscv_vsetvlmax_e32m2(); } -size_t test_vsetvlmax_e32m4() { - return __riscv_vsetvlmax_e32m4(); -} +size_t test_vsetvlmax_e32m4() { return __riscv_vsetvlmax_e32m4(); } -size_t test_vsetvlmax_e32m8() { - return __riscv_vsetvlmax_e32m8(); -} +size_t test_vsetvlmax_e32m8() { return __riscv_vsetvlmax_e32m8(); } -size_t test_vsetvlmax_e64m1() { - return __riscv_vsetvlmax_e64m1(); -} +size_t test_vsetvlmax_e64m1() { return __riscv_vsetvlmax_e64m1(); } -size_t test_vsetvlmax_e64m2() { - return __riscv_vsetvlmax_e64m2(); -} +size_t test_vsetvlmax_e64m2() { return __riscv_vsetvlmax_e64m2(); } -size_t test_vsetvlmax_e64m4() { - return __riscv_vsetvlmax_e64m4(); -} +size_t test_vsetvlmax_e64m4() { return __riscv_vsetvlmax_e64m4(); } -size_t test_vsetvlmax_e64m8() { - return __riscv_vsetvlmax_e64m8(); -} +size_t test_vsetvlmax_e64m8() { return __riscv_vsetvlmax_e64m8(); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vsext_vf2.c b/auto-generated/policy_funcs/llvm-api-tests/vsext_vf2.c index 9d5fa2acb..2877bd461 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vsext_vf2.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vsext_vf2.c @@ -5,11 +5,13 @@ #include -vint16mf4_t test_vsext_vf2_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, size_t vl) { +vint16mf4_t test_vsext_vf2_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, + size_t vl) { return __riscv_vsext_vf2_i16mf4_tu(vd, vs2, vl); } -vint16mf2_t test_vsext_vf2_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, size_t vl) { +vint16mf2_t test_vsext_vf2_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, + size_t vl) { return __riscv_vsext_vf2_i16mf2_tu(vd, vs2, vl); } @@ -29,7 +31,8 @@ vint16m8_t test_vsext_vf2_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, size_t vl) { return __riscv_vsext_vf2_i16m8_tu(vd, vs2, vl); } -vint32mf2_t test_vsext_vf2_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, size_t vl) { +vint32mf2_t test_vsext_vf2_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, + size_t vl) { return __riscv_vsext_vf2_i32mf2_tu(vd, vs2, vl); } @@ -65,182 +68,227 @@ vint64m8_t test_vsext_vf2_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, size_t vl) { return __riscv_vsext_vf2_i64m8_tu(vd, vs2, vl); } -vint16mf4_t test_vsext_vf2_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, size_t vl) { +vint16mf4_t test_vsext_vf2_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs2, size_t vl) { return __riscv_vsext_vf2_i16mf4_tum(vm, vd, vs2, vl); } -vint16mf2_t test_vsext_vf2_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, size_t vl) { +vint16mf2_t test_vsext_vf2_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs2, size_t vl) { return __riscv_vsext_vf2_i16mf2_tum(vm, vd, vs2, vl); } -vint16m1_t test_vsext_vf2_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, size_t vl) { +vint16m1_t test_vsext_vf2_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, + size_t vl) { return __riscv_vsext_vf2_i16m1_tum(vm, vd, vs2, vl); } -vint16m2_t test_vsext_vf2_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, size_t vl) { +vint16m2_t test_vsext_vf2_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, + size_t vl) { return __riscv_vsext_vf2_i16m2_tum(vm, vd, vs2, vl); } -vint16m4_t test_vsext_vf2_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, size_t vl) { +vint16m4_t test_vsext_vf2_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, + size_t vl) { return __riscv_vsext_vf2_i16m4_tum(vm, vd, vs2, vl); } -vint16m8_t test_vsext_vf2_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, size_t vl) { +vint16m8_t test_vsext_vf2_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, + size_t vl) { return __riscv_vsext_vf2_i16m8_tum(vm, vd, vs2, vl); } -vint32mf2_t test_vsext_vf2_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, size_t vl) { +vint32mf2_t test_vsext_vf2_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs2, size_t vl) { return __riscv_vsext_vf2_i32mf2_tum(vm, vd, vs2, vl); } -vint32m1_t test_vsext_vf2_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, size_t vl) { +vint32m1_t test_vsext_vf2_i32m1_tum(vbool32_t vm, vint32m1_t vd, + vint16mf2_t vs2, size_t vl) { return __riscv_vsext_vf2_i32m1_tum(vm, vd, vs2, vl); } -vint32m2_t test_vsext_vf2_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, size_t vl) { +vint32m2_t test_vsext_vf2_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, + size_t vl) { return __riscv_vsext_vf2_i32m2_tum(vm, vd, vs2, vl); } -vint32m4_t test_vsext_vf2_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, size_t vl) { +vint32m4_t test_vsext_vf2_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, + size_t vl) { return __riscv_vsext_vf2_i32m4_tum(vm, vd, vs2, vl); } -vint32m8_t test_vsext_vf2_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, size_t vl) { +vint32m8_t test_vsext_vf2_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, + size_t vl) { return __riscv_vsext_vf2_i32m8_tum(vm, vd, vs2, vl); } -vint64m1_t test_vsext_vf2_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, size_t vl) { +vint64m1_t test_vsext_vf2_i64m1_tum(vbool64_t vm, vint64m1_t vd, + vint32mf2_t vs2, size_t vl) { return __riscv_vsext_vf2_i64m1_tum(vm, vd, vs2, vl); } -vint64m2_t test_vsext_vf2_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, size_t vl) { +vint64m2_t test_vsext_vf2_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, + size_t vl) { return __riscv_vsext_vf2_i64m2_tum(vm, vd, vs2, vl); } -vint64m4_t test_vsext_vf2_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, size_t vl) { +vint64m4_t test_vsext_vf2_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, + size_t vl) { return __riscv_vsext_vf2_i64m4_tum(vm, vd, vs2, vl); } -vint64m8_t test_vsext_vf2_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, size_t vl) { +vint64m8_t test_vsext_vf2_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, + size_t vl) { return __riscv_vsext_vf2_i64m8_tum(vm, vd, vs2, vl); } -vint16mf4_t test_vsext_vf2_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, size_t vl) { +vint16mf4_t test_vsext_vf2_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs2, size_t vl) { return __riscv_vsext_vf2_i16mf4_tumu(vm, vd, vs2, vl); } -vint16mf2_t test_vsext_vf2_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, size_t vl) { +vint16mf2_t test_vsext_vf2_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs2, size_t vl) { return __riscv_vsext_vf2_i16mf2_tumu(vm, vd, vs2, vl); } -vint16m1_t test_vsext_vf2_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, size_t vl) { +vint16m1_t test_vsext_vf2_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + vint8mf2_t vs2, size_t vl) { return __riscv_vsext_vf2_i16m1_tumu(vm, vd, vs2, vl); } -vint16m2_t test_vsext_vf2_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, size_t vl) { +vint16m2_t test_vsext_vf2_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, + size_t vl) { return __riscv_vsext_vf2_i16m2_tumu(vm, vd, vs2, vl); } -vint16m4_t test_vsext_vf2_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, size_t vl) { +vint16m4_t test_vsext_vf2_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, + size_t vl) { return __riscv_vsext_vf2_i16m4_tumu(vm, vd, vs2, vl); } -vint16m8_t test_vsext_vf2_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, size_t vl) { +vint16m8_t test_vsext_vf2_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, + size_t vl) { return __riscv_vsext_vf2_i16m8_tumu(vm, vd, vs2, vl); } -vint32mf2_t test_vsext_vf2_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, size_t vl) { +vint32mf2_t test_vsext_vf2_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs2, size_t vl) { return __riscv_vsext_vf2_i32mf2_tumu(vm, vd, vs2, vl); } -vint32m1_t test_vsext_vf2_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, size_t vl) { +vint32m1_t test_vsext_vf2_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vint16mf2_t vs2, size_t vl) { return __riscv_vsext_vf2_i32m1_tumu(vm, vd, vs2, vl); } -vint32m2_t test_vsext_vf2_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, size_t vl) { +vint32m2_t test_vsext_vf2_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + vint16m1_t vs2, size_t vl) { return __riscv_vsext_vf2_i32m2_tumu(vm, vd, vs2, vl); } -vint32m4_t test_vsext_vf2_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, size_t vl) { +vint32m4_t test_vsext_vf2_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, + size_t vl) { return __riscv_vsext_vf2_i32m4_tumu(vm, vd, vs2, vl); } -vint32m8_t test_vsext_vf2_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, size_t vl) { +vint32m8_t test_vsext_vf2_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, + size_t vl) { return __riscv_vsext_vf2_i32m8_tumu(vm, vd, vs2, vl); } -vint64m1_t test_vsext_vf2_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, size_t vl) { +vint64m1_t test_vsext_vf2_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + vint32mf2_t vs2, size_t vl) { return __riscv_vsext_vf2_i64m1_tumu(vm, vd, vs2, vl); } -vint64m2_t test_vsext_vf2_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, size_t vl) { +vint64m2_t test_vsext_vf2_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + vint32m1_t vs2, size_t vl) { return __riscv_vsext_vf2_i64m2_tumu(vm, vd, vs2, vl); } -vint64m4_t test_vsext_vf2_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, size_t vl) { +vint64m4_t test_vsext_vf2_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + vint32m2_t vs2, size_t vl) { return __riscv_vsext_vf2_i64m4_tumu(vm, vd, vs2, vl); } -vint64m8_t test_vsext_vf2_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, size_t vl) { +vint64m8_t test_vsext_vf2_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, + size_t vl) { return __riscv_vsext_vf2_i64m8_tumu(vm, vd, vs2, vl); } -vint16mf4_t test_vsext_vf2_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, size_t vl) { +vint16mf4_t test_vsext_vf2_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs2, size_t vl) { return __riscv_vsext_vf2_i16mf4_mu(vm, vd, vs2, vl); } -vint16mf2_t test_vsext_vf2_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, size_t vl) { +vint16mf2_t test_vsext_vf2_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs2, size_t vl) { return __riscv_vsext_vf2_i16mf2_mu(vm, vd, vs2, vl); } -vint16m1_t test_vsext_vf2_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, size_t vl) { +vint16m1_t test_vsext_vf2_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, + size_t vl) { return __riscv_vsext_vf2_i16m1_mu(vm, vd, vs2, vl); } -vint16m2_t test_vsext_vf2_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, size_t vl) { +vint16m2_t test_vsext_vf2_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, + size_t vl) { return __riscv_vsext_vf2_i16m2_mu(vm, vd, vs2, vl); } -vint16m4_t test_vsext_vf2_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, size_t vl) { +vint16m4_t test_vsext_vf2_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, + size_t vl) { return __riscv_vsext_vf2_i16m4_mu(vm, vd, vs2, vl); } -vint16m8_t test_vsext_vf2_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, size_t vl) { +vint16m8_t test_vsext_vf2_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, + size_t vl) { return __riscv_vsext_vf2_i16m8_mu(vm, vd, vs2, vl); } -vint32mf2_t test_vsext_vf2_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, size_t vl) { +vint32mf2_t test_vsext_vf2_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs2, size_t vl) { return __riscv_vsext_vf2_i32mf2_mu(vm, vd, vs2, vl); } -vint32m1_t test_vsext_vf2_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, size_t vl) { +vint32m1_t test_vsext_vf2_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, + size_t vl) { return __riscv_vsext_vf2_i32m1_mu(vm, vd, vs2, vl); } -vint32m2_t test_vsext_vf2_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, size_t vl) { +vint32m2_t test_vsext_vf2_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, + size_t vl) { return __riscv_vsext_vf2_i32m2_mu(vm, vd, vs2, vl); } -vint32m4_t test_vsext_vf2_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, size_t vl) { +vint32m4_t test_vsext_vf2_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, + size_t vl) { return __riscv_vsext_vf2_i32m4_mu(vm, vd, vs2, vl); } -vint32m8_t test_vsext_vf2_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, size_t vl) { +vint32m8_t test_vsext_vf2_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, + size_t vl) { return __riscv_vsext_vf2_i32m8_mu(vm, vd, vs2, vl); } -vint64m1_t test_vsext_vf2_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, size_t vl) { +vint64m1_t test_vsext_vf2_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, + size_t vl) { return __riscv_vsext_vf2_i64m1_mu(vm, vd, vs2, vl); } -vint64m2_t test_vsext_vf2_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, size_t vl) { +vint64m2_t test_vsext_vf2_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, + size_t vl) { return __riscv_vsext_vf2_i64m2_mu(vm, vd, vs2, vl); } -vint64m4_t test_vsext_vf2_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, size_t vl) { +vint64m4_t test_vsext_vf2_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, + size_t vl) { return __riscv_vsext_vf2_i64m4_mu(vm, vd, vs2, vl); } -vint64m8_t test_vsext_vf2_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, size_t vl) { +vint64m8_t test_vsext_vf2_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, + size_t vl) { return __riscv_vsext_vf2_i64m8_mu(vm, vd, vs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vsext_vf4.c b/auto-generated/policy_funcs/llvm-api-tests/vsext_vf4.c index 129c5d1b7..0162db44a 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vsext_vf4.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vsext_vf4.c @@ -5,7 +5,8 @@ #include -vint32mf2_t test_vsext_vf4_i32mf2_tu(vint32mf2_t vd, vint8mf8_t vs2, size_t vl) { +vint32mf2_t test_vsext_vf4_i32mf2_tu(vint32mf2_t vd, vint8mf8_t vs2, + size_t vl) { return __riscv_vsext_vf4_i32mf2_tu(vd, vs2, vl); } @@ -41,110 +42,137 @@ vint64m8_t test_vsext_vf4_i64m8_tu(vint64m8_t vd, vint16m2_t vs2, size_t vl) { return __riscv_vsext_vf4_i64m8_tu(vd, vs2, vl); } -vint32mf2_t test_vsext_vf4_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint8mf8_t vs2, size_t vl) { +vint32mf2_t test_vsext_vf4_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint8mf8_t vs2, size_t vl) { return __riscv_vsext_vf4_i32mf2_tum(vm, vd, vs2, vl); } -vint32m1_t test_vsext_vf4_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint8mf4_t vs2, size_t vl) { +vint32m1_t test_vsext_vf4_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint8mf4_t vs2, + size_t vl) { return __riscv_vsext_vf4_i32m1_tum(vm, vd, vs2, vl); } -vint32m2_t test_vsext_vf4_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint8mf2_t vs2, size_t vl) { +vint32m2_t test_vsext_vf4_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint8mf2_t vs2, + size_t vl) { return __riscv_vsext_vf4_i32m2_tum(vm, vd, vs2, vl); } -vint32m4_t test_vsext_vf4_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint8m1_t vs2, size_t vl) { +vint32m4_t test_vsext_vf4_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint8m1_t vs2, + size_t vl) { return __riscv_vsext_vf4_i32m4_tum(vm, vd, vs2, vl); } -vint32m8_t test_vsext_vf4_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint8m2_t vs2, size_t vl) { +vint32m8_t test_vsext_vf4_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint8m2_t vs2, + size_t vl) { return __riscv_vsext_vf4_i32m8_tum(vm, vd, vs2, vl); } -vint64m1_t test_vsext_vf4_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint16mf4_t vs2, size_t vl) { +vint64m1_t test_vsext_vf4_i64m1_tum(vbool64_t vm, vint64m1_t vd, + vint16mf4_t vs2, size_t vl) { return __riscv_vsext_vf4_i64m1_tum(vm, vd, vs2, vl); } -vint64m2_t test_vsext_vf4_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint16mf2_t vs2, size_t vl) { +vint64m2_t test_vsext_vf4_i64m2_tum(vbool32_t vm, vint64m2_t vd, + vint16mf2_t vs2, size_t vl) { return __riscv_vsext_vf4_i64m2_tum(vm, vd, vs2, vl); } -vint64m4_t test_vsext_vf4_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint16m1_t vs2, size_t vl) { +vint64m4_t test_vsext_vf4_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint16m1_t vs2, + size_t vl) { return __riscv_vsext_vf4_i64m4_tum(vm, vd, vs2, vl); } -vint64m8_t test_vsext_vf4_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint16m2_t vs2, size_t vl) { +vint64m8_t test_vsext_vf4_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint16m2_t vs2, + size_t vl) { return __riscv_vsext_vf4_i64m8_tum(vm, vd, vs2, vl); } -vint32mf2_t test_vsext_vf4_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint8mf8_t vs2, size_t vl) { +vint32mf2_t test_vsext_vf4_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint8mf8_t vs2, size_t vl) { return __riscv_vsext_vf4_i32mf2_tumu(vm, vd, vs2, vl); } -vint32m1_t test_vsext_vf4_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint8mf4_t vs2, size_t vl) { +vint32m1_t test_vsext_vf4_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vint8mf4_t vs2, size_t vl) { return __riscv_vsext_vf4_i32m1_tumu(vm, vd, vs2, vl); } -vint32m2_t test_vsext_vf4_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint8mf2_t vs2, size_t vl) { +vint32m2_t test_vsext_vf4_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + vint8mf2_t vs2, size_t vl) { return __riscv_vsext_vf4_i32m2_tumu(vm, vd, vs2, vl); } -vint32m4_t test_vsext_vf4_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint8m1_t vs2, size_t vl) { +vint32m4_t test_vsext_vf4_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint8m1_t vs2, + size_t vl) { return __riscv_vsext_vf4_i32m4_tumu(vm, vd, vs2, vl); } -vint32m8_t test_vsext_vf4_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint8m2_t vs2, size_t vl) { +vint32m8_t test_vsext_vf4_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint8m2_t vs2, + size_t vl) { return __riscv_vsext_vf4_i32m8_tumu(vm, vd, vs2, vl); } -vint64m1_t test_vsext_vf4_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint16mf4_t vs2, size_t vl) { +vint64m1_t test_vsext_vf4_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + vint16mf4_t vs2, size_t vl) { return __riscv_vsext_vf4_i64m1_tumu(vm, vd, vs2, vl); } -vint64m2_t test_vsext_vf4_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint16mf2_t vs2, size_t vl) { +vint64m2_t test_vsext_vf4_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + vint16mf2_t vs2, size_t vl) { return __riscv_vsext_vf4_i64m2_tumu(vm, vd, vs2, vl); } -vint64m4_t test_vsext_vf4_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint16m1_t vs2, size_t vl) { +vint64m4_t test_vsext_vf4_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + vint16m1_t vs2, size_t vl) { return __riscv_vsext_vf4_i64m4_tumu(vm, vd, vs2, vl); } -vint64m8_t test_vsext_vf4_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint16m2_t vs2, size_t vl) { +vint64m8_t test_vsext_vf4_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint16m2_t vs2, + size_t vl) { return __riscv_vsext_vf4_i64m8_tumu(vm, vd, vs2, vl); } -vint32mf2_t test_vsext_vf4_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint8mf8_t vs2, size_t vl) { +vint32mf2_t test_vsext_vf4_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint8mf8_t vs2, size_t vl) { return __riscv_vsext_vf4_i32mf2_mu(vm, vd, vs2, vl); } -vint32m1_t test_vsext_vf4_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint8mf4_t vs2, size_t vl) { +vint32m1_t test_vsext_vf4_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint8mf4_t vs2, + size_t vl) { return __riscv_vsext_vf4_i32m1_mu(vm, vd, vs2, vl); } -vint32m2_t test_vsext_vf4_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint8mf2_t vs2, size_t vl) { +vint32m2_t test_vsext_vf4_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint8mf2_t vs2, + size_t vl) { return __riscv_vsext_vf4_i32m2_mu(vm, vd, vs2, vl); } -vint32m4_t test_vsext_vf4_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint8m1_t vs2, size_t vl) { +vint32m4_t test_vsext_vf4_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint8m1_t vs2, + size_t vl) { return __riscv_vsext_vf4_i32m4_mu(vm, vd, vs2, vl); } -vint32m8_t test_vsext_vf4_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint8m2_t vs2, size_t vl) { +vint32m8_t test_vsext_vf4_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint8m2_t vs2, + size_t vl) { return __riscv_vsext_vf4_i32m8_mu(vm, vd, vs2, vl); } -vint64m1_t test_vsext_vf4_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint16mf4_t vs2, size_t vl) { +vint64m1_t test_vsext_vf4_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint16mf4_t vs2, + size_t vl) { return __riscv_vsext_vf4_i64m1_mu(vm, vd, vs2, vl); } -vint64m2_t test_vsext_vf4_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint16mf2_t vs2, size_t vl) { +vint64m2_t test_vsext_vf4_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint16mf2_t vs2, + size_t vl) { return __riscv_vsext_vf4_i64m2_mu(vm, vd, vs2, vl); } -vint64m4_t test_vsext_vf4_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint16m1_t vs2, size_t vl) { +vint64m4_t test_vsext_vf4_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint16m1_t vs2, + size_t vl) { return __riscv_vsext_vf4_i64m4_mu(vm, vd, vs2, vl); } -vint64m8_t test_vsext_vf4_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint16m2_t vs2, size_t vl) { +vint64m8_t test_vsext_vf4_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint16m2_t vs2, + size_t vl) { return __riscv_vsext_vf4_i64m8_mu(vm, vd, vs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vsext_vf8.c b/auto-generated/policy_funcs/llvm-api-tests/vsext_vf8.c index b4e0f6159..b82c7a0fb 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vsext_vf8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vsext_vf8.c @@ -21,50 +21,62 @@ vint64m8_t test_vsext_vf8_i64m8_tu(vint64m8_t vd, vint8m1_t vs2, size_t vl) { return __riscv_vsext_vf8_i64m8_tu(vd, vs2, vl); } -vint64m1_t test_vsext_vf8_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint8mf8_t vs2, size_t vl) { +vint64m1_t test_vsext_vf8_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint8mf8_t vs2, + size_t vl) { return __riscv_vsext_vf8_i64m1_tum(vm, vd, vs2, vl); } -vint64m2_t test_vsext_vf8_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint8mf4_t vs2, size_t vl) { +vint64m2_t test_vsext_vf8_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint8mf4_t vs2, + size_t vl) { return __riscv_vsext_vf8_i64m2_tum(vm, vd, vs2, vl); } -vint64m4_t test_vsext_vf8_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint8mf2_t vs2, size_t vl) { +vint64m4_t test_vsext_vf8_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint8mf2_t vs2, + size_t vl) { return __riscv_vsext_vf8_i64m4_tum(vm, vd, vs2, vl); } -vint64m8_t test_vsext_vf8_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint8m1_t vs2, size_t vl) { +vint64m8_t test_vsext_vf8_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint8m1_t vs2, + size_t vl) { return __riscv_vsext_vf8_i64m8_tum(vm, vd, vs2, vl); } -vint64m1_t test_vsext_vf8_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint8mf8_t vs2, size_t vl) { +vint64m1_t test_vsext_vf8_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + vint8mf8_t vs2, size_t vl) { return __riscv_vsext_vf8_i64m1_tumu(vm, vd, vs2, vl); } -vint64m2_t test_vsext_vf8_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint8mf4_t vs2, size_t vl) { +vint64m2_t test_vsext_vf8_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + vint8mf4_t vs2, size_t vl) { return __riscv_vsext_vf8_i64m2_tumu(vm, vd, vs2, vl); } -vint64m4_t test_vsext_vf8_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint8mf2_t vs2, size_t vl) { +vint64m4_t test_vsext_vf8_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + vint8mf2_t vs2, size_t vl) { return __riscv_vsext_vf8_i64m4_tumu(vm, vd, vs2, vl); } -vint64m8_t test_vsext_vf8_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint8m1_t vs2, size_t vl) { +vint64m8_t test_vsext_vf8_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint8m1_t vs2, + size_t vl) { return __riscv_vsext_vf8_i64m8_tumu(vm, vd, vs2, vl); } -vint64m1_t test_vsext_vf8_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint8mf8_t vs2, size_t vl) { +vint64m1_t test_vsext_vf8_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint8mf8_t vs2, + size_t vl) { return __riscv_vsext_vf8_i64m1_mu(vm, vd, vs2, vl); } -vint64m2_t test_vsext_vf8_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint8mf4_t vs2, size_t vl) { +vint64m2_t test_vsext_vf8_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint8mf4_t vs2, + size_t vl) { return __riscv_vsext_vf8_i64m2_mu(vm, vd, vs2, vl); } -vint64m4_t test_vsext_vf8_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint8mf2_t vs2, size_t vl) { +vint64m4_t test_vsext_vf8_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint8mf2_t vs2, + size_t vl) { return __riscv_vsext_vf8_i64m4_mu(vm, vd, vs2, vl); } -vint64m8_t test_vsext_vf8_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint8m1_t vs2, size_t vl) { +vint64m8_t test_vsext_vf8_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint8m1_t vs2, + size_t vl) { return __riscv_vsext_vf8_i64m8_mu(vm, vd, vs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vslide1down.c b/auto-generated/policy_funcs/llvm-api-tests/vslide1down.c index 4cf7f288c..eb7accebf 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vslide1down.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vslide1down.c @@ -5,706 +5,995 @@ #include -vint8mf8_t test_vslide1down_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vslide1down_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vslide1down_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vslide1down_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vslide1down_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vslide1down_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vslide1down_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vslide1down_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vslide1down_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vslide1down_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vslide1down_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vslide1down_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vslide1down_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vslide1down_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vslide1down_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vslide1down_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vslide1down_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vslide1down_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vslide1down_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vslide1down_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vslide1down_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vslide1down_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vslide1down_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vslide1down_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vslide1down_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vslide1down_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vslide1down_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vslide1down_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vslide1down_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vslide1down_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vslide1down_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vslide1down_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vslide1down_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vslide1down_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vslide1down_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vslide1down_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vslide1down_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vslide1down_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vslide1down_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vslide1down_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vslide1down_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vslide1down_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vslide1down_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vslide1down_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vslide1down_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vslide1down_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vslide1down_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vslide1down_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vslide1down_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vslide1down_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vslide1down_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vslide1down_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vslide1down_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vslide1down_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vslide1down_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vslide1down_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vslide1down_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vslide1down_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vslide1down_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vslide1down_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vslide1down_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vslide1down_vx_i64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vslide1down_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vslide1down_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vslide1down_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vslide1down_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vslide1down_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vslide1down_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vslide1down_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vslide1down_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vslide1down_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vslide1down_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vslide1down_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vslide1down_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vslide1down_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vslide1down_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vslide1down_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vslide1down_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vslide1down_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vslide1down_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vslide1down_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vslide1down_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vslide1down_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vslide1down_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vslide1down_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vslide1down_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vslide1down_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vslide1down_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vslide1down_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vslide1down_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vslide1down_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vslide1down_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vslide1down_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vslide1down_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vslide1down_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vslide1down_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vslide1down_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vslide1down_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vslide1down_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vslide1down_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vslide1down_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vslide1down_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vslide1down_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vslide1down_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vslide1down_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vslide1down_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vslide1down_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vslide1down_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vslide1down_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vslide1down_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vslide1down_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vslide1down_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vslide1down_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vslide1down_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vslide1down_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vslide1down_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vslide1down_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vslide1down_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vslide1down_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vslide1down_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vslide1down_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vslide1down_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vslide1down_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vslide1down_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vslide1down_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vslide1down_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vslide1down_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vslide1down_vx_u64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vslide1down_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vslide1down_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + vint8mf8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslide1down_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vslide1down_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + vint8mf4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslide1down_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vslide1down_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, + vint8mf2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslide1down_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vslide1down_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vslide1down_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslide1down_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vslide1down_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vslide1down_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslide1down_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vslide1down_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vslide1down_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslide1down_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vslide1down_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vslide1down_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslide1down_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vslide1down_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslide1down_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vslide1down_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslide1down_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vslide1down_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs2, int16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslide1down_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vslide1down_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, + vint16m2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslide1down_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vslide1down_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, + vint16m4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslide1down_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vslide1down_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, + vint16m8_t vs2, int16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslide1down_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vslide1down_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslide1down_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vslide1down_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs2, int32_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslide1down_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vslide1down_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, + vint32m2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslide1down_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vslide1down_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, + vint32m4_t vs2, int32_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslide1down_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vslide1down_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, + vint32m8_t vs2, int32_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslide1down_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vslide1down_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs2, int64_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslide1down_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vslide1down_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, + vint64m2_t vs2, int64_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslide1down_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vslide1down_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, + vint64m4_t vs2, int64_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslide1down_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vslide1down_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, + vint64m8_t vs2, int64_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslide1down_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vslide1down_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslide1down_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vslide1down_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslide1down_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vslide1down_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslide1down_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vslide1down_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, + vuint8m1_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslide1down_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vslide1down_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, + vuint8m2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslide1down_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vslide1down_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, + vuint8m4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslide1down_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vslide1down_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, + vuint8m8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslide1down_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vslide1down_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslide1down_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vslide1down_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslide1down_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vslide1down_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslide1down_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vslide1down_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslide1down_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vslide1down_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslide1down_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vslide1down_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslide1down_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vslide1down_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslide1down_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vslide1down_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslide1down_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vslide1down_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslide1down_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vslide1down_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslide1down_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vslide1down_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslide1down_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vslide1down_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslide1down_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vslide1down_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslide1down_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vslide1down_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslide1down_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vslide1down_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslide1down_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vslide1down_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + vint8mf8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslide1down_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vslide1down_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + vint8mf4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslide1down_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vslide1down_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + vint8mf2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslide1down_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vslide1down_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, + vint8m1_t vs2, int8_t rs1, size_t vl) { return __riscv_vslide1down_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslide1down_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vslide1down_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, + vint8m2_t vs2, int8_t rs1, size_t vl) { return __riscv_vslide1down_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslide1down_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vslide1down_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, + vint8m4_t vs2, int8_t rs1, size_t vl) { return __riscv_vslide1down_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslide1down_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vslide1down_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, + vint8m8_t vs2, int8_t rs1, size_t vl) { return __riscv_vslide1down_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslide1down_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vslide1down_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslide1down_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vslide1down_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslide1down_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vslide1down_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs2, int16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslide1down_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vslide1down_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, + vint16m2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslide1down_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vslide1down_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, + vint16m4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslide1down_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vslide1down_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, + vint16m8_t vs2, int16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslide1down_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vslide1down_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslide1down_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vslide1down_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs2, int32_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslide1down_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vslide1down_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + vint32m2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslide1down_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vslide1down_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, + vint32m4_t vs2, int32_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslide1down_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vslide1down_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, + vint32m8_t vs2, int32_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslide1down_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vslide1down_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs2, int64_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslide1down_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vslide1down_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + vint64m2_t vs2, int64_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslide1down_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vslide1down_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + vint64m4_t vs2, int64_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslide1down_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vslide1down_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, + vint64m8_t vs2, int64_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslide1down_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vslide1down_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslide1down_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vslide1down_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslide1down_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vslide1down_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslide1down_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vslide1down_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + vuint8m1_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslide1down_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vslide1down_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + vuint8m2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslide1down_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vslide1down_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + vuint8m4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslide1down_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vslide1down_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, + vuint8m8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslide1down_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vslide1down_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslide1down_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vslide1down_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslide1down_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vslide1down_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslide1down_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vslide1down_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslide1down_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vslide1down_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslide1down_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vslide1down_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslide1down_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vslide1down_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslide1down_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vslide1down_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslide1down_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vslide1down_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslide1down_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vslide1down_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslide1down_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vslide1down_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslide1down_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vslide1down_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslide1down_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vslide1down_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslide1down_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vslide1down_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslide1down_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vslide1down_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslide1down_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vslide1down_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, + vint8mf8_t vs2, int8_t rs1, size_t vl) { return __riscv_vslide1down_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslide1down_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vslide1down_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, + vint8mf4_t vs2, int8_t rs1, size_t vl) { return __riscv_vslide1down_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslide1down_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vslide1down_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, + vint8mf2_t vs2, int8_t rs1, size_t vl) { return __riscv_vslide1down_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslide1down_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vslide1down_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vslide1down_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslide1down_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vslide1down_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vslide1down_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslide1down_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vslide1down_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vslide1down_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslide1down_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vslide1down_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vslide1down_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslide1down_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vslide1down_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslide1down_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vslide1down_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslide1down_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vslide1down_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs2, int16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslide1down_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vslide1down_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, + vint16m2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslide1down_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vslide1down_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, + vint16m4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslide1down_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vslide1down_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, + vint16m8_t vs2, int16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslide1down_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vslide1down_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslide1down_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vslide1down_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs2, int32_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslide1down_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vslide1down_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, + vint32m2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslide1down_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vslide1down_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, + vint32m4_t vs2, int32_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslide1down_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vslide1down_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, + vint32m8_t vs2, int32_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslide1down_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vslide1down_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs2, int64_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslide1down_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vslide1down_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, + vint64m2_t vs2, int64_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslide1down_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vslide1down_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, + vint64m4_t vs2, int64_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslide1down_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vslide1down_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, + vint64m8_t vs2, int64_t rs1, + size_t vl) { return __riscv_vslide1down_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslide1down_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vslide1down_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslide1down_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vslide1down_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslide1down_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vslide1down_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslide1down_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vslide1down_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, + vuint8m1_t vs2, uint8_t rs1, size_t vl) { return __riscv_vslide1down_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslide1down_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vslide1down_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, + vuint8m2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vslide1down_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslide1down_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vslide1down_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, + vuint8m4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vslide1down_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslide1down_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vslide1down_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, + vuint8m8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vslide1down_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslide1down_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vslide1down_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslide1down_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vslide1down_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslide1down_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vslide1down_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslide1down_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vslide1down_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslide1down_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vslide1down_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslide1down_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vslide1down_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslide1down_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vslide1down_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslide1down_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vslide1down_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslide1down_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vslide1down_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslide1down_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vslide1down_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslide1down_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vslide1down_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslide1down_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vslide1down_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslide1down_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vslide1down_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslide1down_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vslide1down_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslide1down_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vslide1down_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vslide1down_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vslide1up.c b/auto-generated/policy_funcs/llvm-api-tests/vslide1up.c index 3535fe3a1..3bd7e4de9 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vslide1up.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vslide1up.c @@ -5,706 +5,957 @@ #include -vint8mf8_t test_vslide1up_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vslide1up_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vslide1up_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vslide1up_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vslide1up_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vslide1up_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vslide1up_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vslide1up_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vslide1up_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vslide1up_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vslide1up_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, + size_t vl) { return __riscv_vslide1up_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vslide1up_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vslide1up_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vslide1up_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vslide1up_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vslide1up_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vslide1up_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vslide1up_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vslide1up_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vslide1up_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vslide1up_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vslide1up_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vslide1up_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vslide1up_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vslide1up_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vslide1up_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vslide1up_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vslide1up_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vslide1up_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vslide1up_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vslide1up_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vslide1up_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vslide1up_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vslide1up_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vslide1up_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vslide1up_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vslide1up_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vslide1up_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vslide1up_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vslide1up_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vslide1up_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vslide1up_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vslide1up_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vslide1up_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vslide1up_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vslide1up_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vslide1up_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vslide1up_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vslide1up_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vslide1up_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vslide1up_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vslide1up_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vslide1up_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vslide1up_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vslide1up_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vslide1up_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vslide1up_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vslide1up_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vslide1up_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vslide1up_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vslide1up_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vslide1up_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vslide1up_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vslide1up_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vslide1up_vx_i64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vslide1up_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vslide1up_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vslide1up_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vslide1up_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vslide1up_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vslide1up_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vslide1up_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vslide1up_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vslide1up_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vslide1up_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vslide1up_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vslide1up_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vslide1up_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vslide1up_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vslide1up_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vslide1up_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vslide1up_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vslide1up_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vslide1up_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vslide1up_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vslide1up_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vslide1up_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vslide1up_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vslide1up_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vslide1up_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vslide1up_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vslide1up_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vslide1up_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vslide1up_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vslide1up_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vslide1up_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vslide1up_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vslide1up_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vslide1up_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vslide1up_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vslide1up_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vslide1up_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vslide1up_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vslide1up_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vslide1up_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vslide1up_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vslide1up_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vslide1up_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vslide1up_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vslide1up_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vslide1up_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vslide1up_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vslide1up_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vslide1up_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vslide1up_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vslide1up_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vslide1up_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vslide1up_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vslide1up_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vslide1up_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vslide1up_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vslide1up_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vslide1up_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vslide1up_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vslide1up_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vslide1up_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vslide1up_vx_u64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vslide1up_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vslide1up_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + vint8mf8_t vs2, int8_t rs1, size_t vl) { return __riscv_vslide1up_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslide1up_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vslide1up_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + vint8mf4_t vs2, int8_t rs1, size_t vl) { return __riscv_vslide1up_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslide1up_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vslide1up_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, + vint8mf2_t vs2, int8_t rs1, size_t vl) { return __riscv_vslide1up_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslide1up_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vslide1up_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vslide1up_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslide1up_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vslide1up_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vslide1up_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslide1up_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vslide1up_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vslide1up_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslide1up_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vslide1up_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vslide1up_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslide1up_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vslide1up_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vslide1up_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslide1up_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vslide1up_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vslide1up_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslide1up_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vslide1up_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs2, int16_t rs1, size_t vl) { return __riscv_vslide1up_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslide1up_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vslide1up_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, + vint16m2_t vs2, int16_t rs1, size_t vl) { return __riscv_vslide1up_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslide1up_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vslide1up_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, + vint16m4_t vs2, int16_t rs1, size_t vl) { return __riscv_vslide1up_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslide1up_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vslide1up_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, + vint16m8_t vs2, int16_t rs1, size_t vl) { return __riscv_vslide1up_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslide1up_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vslide1up_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vslide1up_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslide1up_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vslide1up_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs2, int32_t rs1, size_t vl) { return __riscv_vslide1up_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslide1up_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vslide1up_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, + vint32m2_t vs2, int32_t rs1, size_t vl) { return __riscv_vslide1up_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslide1up_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vslide1up_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, + vint32m4_t vs2, int32_t rs1, size_t vl) { return __riscv_vslide1up_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslide1up_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vslide1up_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, + vint32m8_t vs2, int32_t rs1, size_t vl) { return __riscv_vslide1up_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslide1up_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vslide1up_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs2, int64_t rs1, size_t vl) { return __riscv_vslide1up_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslide1up_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vslide1up_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, + vint64m2_t vs2, int64_t rs1, size_t vl) { return __riscv_vslide1up_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslide1up_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vslide1up_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, + vint64m4_t vs2, int64_t rs1, size_t vl) { return __riscv_vslide1up_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslide1up_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vslide1up_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, + vint64m8_t vs2, int64_t rs1, size_t vl) { return __riscv_vslide1up_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslide1up_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vslide1up_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslide1up_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vslide1up_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslide1up_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vslide1up_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslide1up_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vslide1up_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, + vuint8m1_t vs2, uint8_t rs1, size_t vl) { return __riscv_vslide1up_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslide1up_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vslide1up_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, + vuint8m2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vslide1up_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslide1up_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vslide1up_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, + vuint8m4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vslide1up_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslide1up_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vslide1up_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, + vuint8m8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vslide1up_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslide1up_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vslide1up_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslide1up_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vslide1up_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslide1up_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vslide1up_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslide1up_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vslide1up_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslide1up_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vslide1up_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslide1up_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vslide1up_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslide1up_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vslide1up_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslide1up_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vslide1up_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslide1up_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vslide1up_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslide1up_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vslide1up_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslide1up_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vslide1up_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslide1up_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vslide1up_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslide1up_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vslide1up_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslide1up_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vslide1up_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslide1up_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vslide1up_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslide1up_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vslide1up_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + vint8mf8_t vs2, int8_t rs1, size_t vl) { return __riscv_vslide1up_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslide1up_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vslide1up_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + vint8mf4_t vs2, int8_t rs1, size_t vl) { return __riscv_vslide1up_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslide1up_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vslide1up_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + vint8mf2_t vs2, int8_t rs1, size_t vl) { return __riscv_vslide1up_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslide1up_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vslide1up_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vslide1up_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslide1up_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vslide1up_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vslide1up_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslide1up_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vslide1up_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vslide1up_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslide1up_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vslide1up_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vslide1up_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslide1up_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vslide1up_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vslide1up_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslide1up_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vslide1up_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vslide1up_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslide1up_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vslide1up_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs2, int16_t rs1, + size_t vl) { return __riscv_vslide1up_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslide1up_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vslide1up_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, + vint16m2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vslide1up_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslide1up_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vslide1up_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, + vint16m4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vslide1up_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslide1up_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vslide1up_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, + vint16m8_t vs2, int16_t rs1, + size_t vl) { return __riscv_vslide1up_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslide1up_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vslide1up_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vslide1up_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslide1up_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vslide1up_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs2, int32_t rs1, + size_t vl) { return __riscv_vslide1up_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslide1up_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vslide1up_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + vint32m2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vslide1up_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslide1up_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vslide1up_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, + vint32m4_t vs2, int32_t rs1, + size_t vl) { return __riscv_vslide1up_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslide1up_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vslide1up_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, + vint32m8_t vs2, int32_t rs1, + size_t vl) { return __riscv_vslide1up_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslide1up_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vslide1up_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs2, int64_t rs1, + size_t vl) { return __riscv_vslide1up_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslide1up_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vslide1up_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + vint64m2_t vs2, int64_t rs1, + size_t vl) { return __riscv_vslide1up_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslide1up_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vslide1up_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + vint64m4_t vs2, int64_t rs1, + size_t vl) { return __riscv_vslide1up_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslide1up_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vslide1up_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, + vint64m8_t vs2, int64_t rs1, + size_t vl) { return __riscv_vslide1up_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslide1up_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vslide1up_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslide1up_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vslide1up_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslide1up_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vslide1up_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslide1up_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vslide1up_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + vuint8m1_t vs2, uint8_t rs1, size_t vl) { return __riscv_vslide1up_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslide1up_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vslide1up_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + vuint8m2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vslide1up_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslide1up_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vslide1up_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + vuint8m4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vslide1up_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslide1up_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vslide1up_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, + vuint8m8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vslide1up_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslide1up_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vslide1up_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslide1up_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vslide1up_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslide1up_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vslide1up_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslide1up_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vslide1up_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslide1up_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vslide1up_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslide1up_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vslide1up_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslide1up_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vslide1up_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslide1up_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vslide1up_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslide1up_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vslide1up_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslide1up_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vslide1up_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslide1up_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vslide1up_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslide1up_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vslide1up_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslide1up_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vslide1up_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslide1up_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vslide1up_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslide1up_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vslide1up_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslide1up_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vslide1up_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, + vint8mf8_t vs2, int8_t rs1, size_t vl) { return __riscv_vslide1up_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslide1up_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vslide1up_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, + vint8mf4_t vs2, int8_t rs1, size_t vl) { return __riscv_vslide1up_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslide1up_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vslide1up_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, + vint8mf2_t vs2, int8_t rs1, size_t vl) { return __riscv_vslide1up_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslide1up_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vslide1up_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vslide1up_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslide1up_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vslide1up_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vslide1up_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslide1up_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vslide1up_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vslide1up_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslide1up_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vslide1up_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vslide1up_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslide1up_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vslide1up_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vslide1up_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslide1up_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vslide1up_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vslide1up_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslide1up_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vslide1up_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs2, int16_t rs1, size_t vl) { return __riscv_vslide1up_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslide1up_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vslide1up_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, + vint16m2_t vs2, int16_t rs1, size_t vl) { return __riscv_vslide1up_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslide1up_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vslide1up_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, + vint16m4_t vs2, int16_t rs1, size_t vl) { return __riscv_vslide1up_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslide1up_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vslide1up_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, + vint16m8_t vs2, int16_t rs1, size_t vl) { return __riscv_vslide1up_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslide1up_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vslide1up_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vslide1up_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslide1up_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vslide1up_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs2, int32_t rs1, size_t vl) { return __riscv_vslide1up_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslide1up_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vslide1up_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, + vint32m2_t vs2, int32_t rs1, size_t vl) { return __riscv_vslide1up_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslide1up_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vslide1up_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, + vint32m4_t vs2, int32_t rs1, size_t vl) { return __riscv_vslide1up_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslide1up_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vslide1up_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, + vint32m8_t vs2, int32_t rs1, size_t vl) { return __riscv_vslide1up_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslide1up_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vslide1up_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs2, int64_t rs1, size_t vl) { return __riscv_vslide1up_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslide1up_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vslide1up_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, + vint64m2_t vs2, int64_t rs1, size_t vl) { return __riscv_vslide1up_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslide1up_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vslide1up_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, + vint64m4_t vs2, int64_t rs1, size_t vl) { return __riscv_vslide1up_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslide1up_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vslide1up_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, + vint64m8_t vs2, int64_t rs1, size_t vl) { return __riscv_vslide1up_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslide1up_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vslide1up_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslide1up_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vslide1up_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslide1up_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vslide1up_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslide1up_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vslide1up_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vslide1up_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslide1up_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vslide1up_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vslide1up_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslide1up_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vslide1up_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vslide1up_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslide1up_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vslide1up_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vslide1up_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslide1up_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vslide1up_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslide1up_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vslide1up_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslide1up_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vslide1up_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslide1up_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vslide1up_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslide1up_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vslide1up_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslide1up_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vslide1up_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslide1up_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vslide1up_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslide1up_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vslide1up_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslide1up_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vslide1up_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslide1up_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vslide1up_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslide1up_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vslide1up_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslide1up_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vslide1up_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslide1up_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vslide1up_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslide1up_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vslide1up_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslide1up_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vslide1up_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vslide1up_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vslidedown.c b/auto-generated/policy_funcs/llvm-api-tests/vslidedown.c index 34f836cb6..370264ff1 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vslidedown.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vslidedown.c @@ -1,951 +1,1310 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vslidedown_vx_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t rs1, size_t vl) { +vfloat16mf4_t test_vslidedown_vx_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_f16mf4_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vslidedown_vx_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t rs1, size_t vl) { +vfloat16mf2_t test_vslidedown_vx_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_f16mf2_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vslidedown_vx_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, size_t rs1, size_t vl) { +vfloat16m1_t test_vslidedown_vx_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_f16m1_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vslidedown_vx_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, size_t rs1, size_t vl) { +vfloat16m2_t test_vslidedown_vx_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_f16m2_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vslidedown_vx_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, size_t rs1, size_t vl) { +vfloat16m4_t test_vslidedown_vx_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_f16m4_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vslidedown_vx_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, size_t rs1, size_t vl) { +vfloat16m8_t test_vslidedown_vx_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_f16m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vslidedown_vx_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t rs1, size_t vl) { +vfloat32mf2_t test_vslidedown_vx_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vslidedown_vx_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, size_t rs1, size_t vl) { +vfloat32m1_t test_vslidedown_vx_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vslidedown_vx_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, size_t rs1, size_t vl) { +vfloat32m2_t test_vslidedown_vx_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vslidedown_vx_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, size_t rs1, size_t vl) { +vfloat32m4_t test_vslidedown_vx_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vslidedown_vx_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, size_t rs1, size_t vl) { +vfloat32m8_t test_vslidedown_vx_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vslidedown_vx_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, size_t rs1, size_t vl) { +vfloat64m1_t test_vslidedown_vx_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vslidedown_vx_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, size_t rs1, size_t vl) { +vfloat64m2_t test_vslidedown_vx_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vslidedown_vx_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, size_t rs1, size_t vl) { +vfloat64m4_t test_vslidedown_vx_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vslidedown_vx_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, size_t rs1, size_t vl) { +vfloat64m8_t test_vslidedown_vx_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_f64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vslidedown_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { +vint8mf8_t test_vslidedown_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vslidedown_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { +vint8mf4_t test_vslidedown_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vslidedown_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { +vint8mf2_t test_vslidedown_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vslidedown_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { +vint8m1_t test_vslidedown_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vslidedown_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { +vint8m2_t test_vslidedown_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vslidedown_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { +vint8m4_t test_vslidedown_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vslidedown_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { +vint8m8_t test_vslidedown_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vslidedown_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { +vint16mf4_t test_vslidedown_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vslidedown_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { +vint16mf2_t test_vslidedown_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vslidedown_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { +vint16m1_t test_vslidedown_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vslidedown_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { +vint16m2_t test_vslidedown_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vslidedown_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { +vint16m4_t test_vslidedown_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vslidedown_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { +vint16m8_t test_vslidedown_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vslidedown_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { +vint32mf2_t test_vslidedown_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vslidedown_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { +vint32m1_t test_vslidedown_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vslidedown_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { +vint32m2_t test_vslidedown_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vslidedown_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { +vint32m4_t test_vslidedown_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vslidedown_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { +vint32m8_t test_vslidedown_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vslidedown_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { +vint64m1_t test_vslidedown_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vslidedown_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { +vint64m2_t test_vslidedown_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vslidedown_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { +vint64m4_t test_vslidedown_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vslidedown_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { +vint64m8_t test_vslidedown_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vslidedown_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { +vuint8mf8_t test_vslidedown_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vslidedown_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { +vuint8mf4_t test_vslidedown_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vslidedown_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { +vuint8mf2_t test_vslidedown_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vslidedown_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { +vuint8m1_t test_vslidedown_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vslidedown_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { +vuint8m2_t test_vslidedown_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vslidedown_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { +vuint8m4_t test_vslidedown_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vslidedown_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { +vuint8m8_t test_vslidedown_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vslidedown_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vslidedown_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vslidedown_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vslidedown_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vslidedown_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vslidedown_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vslidedown_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vslidedown_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vslidedown_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vslidedown_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vslidedown_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vslidedown_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vslidedown_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vslidedown_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vslidedown_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vslidedown_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vslidedown_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vslidedown_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vslidedown_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vslidedown_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vslidedown_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vslidedown_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vslidedown_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vslidedown_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vslidedown_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vslidedown_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vslidedown_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vslidedown_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vslidedown_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vslidedown_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_u64m8_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vslidedown_vx_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t rs1, size_t vl) { +vfloat16mf4_t test_vslidedown_vx_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f16mf4_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vslidedown_vx_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t rs1, size_t vl) { +vfloat16mf2_t test_vslidedown_vx_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f16mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vslidedown_vx_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t rs1, size_t vl) { +vfloat16m1_t test_vslidedown_vx_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f16m1_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vslidedown_vx_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t rs1, size_t vl) { +vfloat16m2_t test_vslidedown_vx_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f16m2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vslidedown_vx_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t rs1, size_t vl) { +vfloat16m4_t test_vslidedown_vx_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f16m4_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vslidedown_vx_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t rs1, size_t vl) { +vfloat16m8_t test_vslidedown_vx_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f16m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vslidedown_vx_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t rs1, size_t vl) { +vfloat32mf2_t test_vslidedown_vx_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vslidedown_vx_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t rs1, size_t vl) { +vfloat32m1_t test_vslidedown_vx_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vslidedown_vx_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t rs1, size_t vl) { +vfloat32m2_t test_vslidedown_vx_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vslidedown_vx_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t rs1, size_t vl) { +vfloat32m4_t test_vslidedown_vx_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vslidedown_vx_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t rs1, size_t vl) { +vfloat32m8_t test_vslidedown_vx_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vslidedown_vx_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t rs1, size_t vl) { +vfloat64m1_t test_vslidedown_vx_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vslidedown_vx_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t rs1, size_t vl) { +vfloat64m2_t test_vslidedown_vx_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vslidedown_vx_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t rs1, size_t vl) { +vfloat64m4_t test_vslidedown_vx_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vslidedown_vx_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t rs1, size_t vl) { +vfloat64m8_t test_vslidedown_vx_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslidedown_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { +vint8mf8_t test_vslidedown_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + vint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslidedown_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { +vint8mf4_t test_vslidedown_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + vint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslidedown_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { +vint8mf2_t test_vslidedown_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, + vint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslidedown_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { +vint8m1_t test_vslidedown_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslidedown_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { +vint8m2_t test_vslidedown_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslidedown_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { +vint8m4_t test_vslidedown_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslidedown_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { +vint8m8_t test_vslidedown_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslidedown_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { +vint16mf4_t test_vslidedown_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslidedown_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { +vint16mf2_t test_vslidedown_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslidedown_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { +vint16m1_t test_vslidedown_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslidedown_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { +vint16m2_t test_vslidedown_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, + vint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslidedown_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { +vint16m4_t test_vslidedown_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, + vint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslidedown_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { +vint16m8_t test_vslidedown_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, + vint16m8_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslidedown_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { +vint32mf2_t test_vslidedown_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslidedown_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { +vint32m1_t test_vslidedown_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslidedown_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { +vint32m2_t test_vslidedown_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, + vint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslidedown_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { +vint32m4_t test_vslidedown_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, + vint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslidedown_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { +vint32m8_t test_vslidedown_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, + vint32m8_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslidedown_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { +vint64m1_t test_vslidedown_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslidedown_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { +vint64m2_t test_vslidedown_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, + vint64m2_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslidedown_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { +vint64m4_t test_vslidedown_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, + vint64m4_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslidedown_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { +vint64m8_t test_vslidedown_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, + vint64m8_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslidedown_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { +vuint8mf8_t test_vslidedown_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslidedown_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { +vuint8mf4_t test_vslidedown_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslidedown_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { +vuint8mf2_t test_vslidedown_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslidedown_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { +vuint8m1_t test_vslidedown_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, + vuint8m1_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslidedown_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { +vuint8m2_t test_vslidedown_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, + vuint8m2_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslidedown_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { +vuint8m4_t test_vslidedown_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, + vuint8m4_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslidedown_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { +vuint8m8_t test_vslidedown_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, + vuint8m8_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslidedown_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vslidedown_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslidedown_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vslidedown_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslidedown_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vslidedown_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslidedown_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vslidedown_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslidedown_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vslidedown_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslidedown_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vslidedown_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslidedown_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vslidedown_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslidedown_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vslidedown_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslidedown_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vslidedown_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslidedown_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vslidedown_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslidedown_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vslidedown_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslidedown_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vslidedown_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslidedown_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vslidedown_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslidedown_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vslidedown_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslidedown_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vslidedown_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vslidedown_vx_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t rs1, size_t vl) { +vfloat16mf4_t test_vslidedown_vx_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f16mf4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vslidedown_vx_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t rs1, size_t vl) { +vfloat16mf2_t test_vslidedown_vx_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f16mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vslidedown_vx_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t rs1, size_t vl) { +vfloat16m1_t test_vslidedown_vx_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f16m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vslidedown_vx_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t rs1, size_t vl) { +vfloat16m2_t test_vslidedown_vx_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f16m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vslidedown_vx_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t rs1, size_t vl) { +vfloat16m4_t test_vslidedown_vx_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f16m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vslidedown_vx_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t rs1, size_t vl) { +vfloat16m8_t test_vslidedown_vx_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f16m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vslidedown_vx_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t rs1, size_t vl) { +vfloat32mf2_t test_vslidedown_vx_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vslidedown_vx_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t rs1, size_t vl) { +vfloat32m1_t test_vslidedown_vx_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vslidedown_vx_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t rs1, size_t vl) { +vfloat32m2_t test_vslidedown_vx_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vslidedown_vx_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t rs1, size_t vl) { +vfloat32m4_t test_vslidedown_vx_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vslidedown_vx_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t rs1, size_t vl) { +vfloat32m8_t test_vslidedown_vx_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vslidedown_vx_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t rs1, size_t vl) { +vfloat64m1_t test_vslidedown_vx_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vslidedown_vx_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t rs1, size_t vl) { +vfloat64m2_t test_vslidedown_vx_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vslidedown_vx_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t rs1, size_t vl) { +vfloat64m4_t test_vslidedown_vx_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vslidedown_vx_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t rs1, size_t vl) { +vfloat64m8_t test_vslidedown_vx_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslidedown_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { +vint8mf8_t test_vslidedown_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + vint8mf8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslidedown_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { +vint8mf4_t test_vslidedown_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + vint8mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslidedown_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { +vint8mf2_t test_vslidedown_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + vint8mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslidedown_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { +vint8m1_t test_vslidedown_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslidedown_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { +vint8m2_t test_vslidedown_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslidedown_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { +vint8m4_t test_vslidedown_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslidedown_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { +vint8m8_t test_vslidedown_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslidedown_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { +vint16mf4_t test_vslidedown_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslidedown_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { +vint16mf2_t test_vslidedown_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslidedown_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { +vint16m1_t test_vslidedown_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslidedown_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { +vint16m2_t test_vslidedown_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, + vint16m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslidedown_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { +vint16m4_t test_vslidedown_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, + vint16m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslidedown_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { +vint16m8_t test_vslidedown_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, + vint16m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslidedown_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { +vint32mf2_t test_vslidedown_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslidedown_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { +vint32m1_t test_vslidedown_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslidedown_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { +vint32m2_t test_vslidedown_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + vint32m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslidedown_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { +vint32m4_t test_vslidedown_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, + vint32m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslidedown_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { +vint32m8_t test_vslidedown_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, + vint32m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslidedown_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { +vint64m1_t test_vslidedown_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslidedown_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { +vint64m2_t test_vslidedown_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + vint64m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslidedown_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { +vint64m4_t test_vslidedown_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + vint64m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslidedown_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { +vint64m8_t test_vslidedown_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, + vint64m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslidedown_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { +vuint8mf8_t test_vslidedown_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslidedown_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { +vuint8mf4_t test_vslidedown_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslidedown_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { +vuint8mf2_t test_vslidedown_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslidedown_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { +vuint8m1_t test_vslidedown_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + vuint8m1_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslidedown_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { +vuint8m2_t test_vslidedown_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + vuint8m2_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslidedown_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { +vuint8m4_t test_vslidedown_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + vuint8m4_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslidedown_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { +vuint8m8_t test_vslidedown_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, + vuint8m8_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslidedown_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vslidedown_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslidedown_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vslidedown_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslidedown_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vslidedown_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslidedown_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vslidedown_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslidedown_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vslidedown_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslidedown_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vslidedown_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslidedown_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vslidedown_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslidedown_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vslidedown_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslidedown_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vslidedown_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslidedown_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vslidedown_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslidedown_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vslidedown_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslidedown_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vslidedown_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslidedown_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vslidedown_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslidedown_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vslidedown_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslidedown_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vslidedown_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vslidedown_vx_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t rs1, size_t vl) { +vfloat16mf4_t test_vslidedown_vx_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f16mf4_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vslidedown_vx_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t rs1, size_t vl) { +vfloat16mf2_t test_vslidedown_vx_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f16mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vslidedown_vx_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t rs1, size_t vl) { +vfloat16m1_t test_vslidedown_vx_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f16m1_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vslidedown_vx_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t rs1, size_t vl) { +vfloat16m2_t test_vslidedown_vx_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f16m2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vslidedown_vx_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t rs1, size_t vl) { +vfloat16m4_t test_vslidedown_vx_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f16m4_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vslidedown_vx_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t rs1, size_t vl) { +vfloat16m8_t test_vslidedown_vx_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f16m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vslidedown_vx_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t rs1, size_t vl) { +vfloat32mf2_t test_vslidedown_vx_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vslidedown_vx_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t rs1, size_t vl) { +vfloat32m1_t test_vslidedown_vx_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vslidedown_vx_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t rs1, size_t vl) { +vfloat32m2_t test_vslidedown_vx_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vslidedown_vx_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t rs1, size_t vl) { +vfloat32m4_t test_vslidedown_vx_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vslidedown_vx_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t rs1, size_t vl) { +vfloat32m8_t test_vslidedown_vx_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vslidedown_vx_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t rs1, size_t vl) { +vfloat64m1_t test_vslidedown_vx_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vslidedown_vx_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t rs1, size_t vl) { +vfloat64m2_t test_vslidedown_vx_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vslidedown_vx_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t rs1, size_t vl) { +vfloat64m4_t test_vslidedown_vx_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vslidedown_vx_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t rs1, size_t vl) { +vfloat64m8_t test_vslidedown_vx_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_f64m8_mu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslidedown_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { +vint8mf8_t test_vslidedown_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, + vint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslidedown_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { +vint8mf4_t test_vslidedown_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, + vint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslidedown_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { +vint8mf2_t test_vslidedown_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, + vint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslidedown_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { +vint8m1_t test_vslidedown_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslidedown_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { +vint8m2_t test_vslidedown_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslidedown_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { +vint8m4_t test_vslidedown_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslidedown_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { +vint8m8_t test_vslidedown_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslidedown_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { +vint16mf4_t test_vslidedown_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslidedown_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { +vint16mf2_t test_vslidedown_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslidedown_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { +vint16m1_t test_vslidedown_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslidedown_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { +vint16m2_t test_vslidedown_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, + vint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslidedown_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { +vint16m4_t test_vslidedown_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, + vint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslidedown_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { +vint16m8_t test_vslidedown_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, + vint16m8_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslidedown_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { +vint32mf2_t test_vslidedown_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslidedown_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { +vint32m1_t test_vslidedown_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslidedown_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { +vint32m2_t test_vslidedown_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, + vint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslidedown_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { +vint32m4_t test_vslidedown_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, + vint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslidedown_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { +vint32m8_t test_vslidedown_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, + vint32m8_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslidedown_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { +vint64m1_t test_vslidedown_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslidedown_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { +vint64m2_t test_vslidedown_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, + vint64m2_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslidedown_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { +vint64m4_t test_vslidedown_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, + vint64m4_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslidedown_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { +vint64m8_t test_vslidedown_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, + vint64m8_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslidedown_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { +vuint8mf8_t test_vslidedown_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslidedown_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { +vuint8mf4_t test_vslidedown_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslidedown_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { +vuint8mf2_t test_vslidedown_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslidedown_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { +vuint8m1_t test_vslidedown_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, + vuint8m1_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslidedown_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { +vuint8m2_t test_vslidedown_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, + vuint8m2_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslidedown_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { +vuint8m4_t test_vslidedown_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, + vuint8m4_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslidedown_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { +vuint8m8_t test_vslidedown_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, + vuint8m8_t vs2, size_t rs1, size_t vl) { return __riscv_vslidedown_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslidedown_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vslidedown_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslidedown_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vslidedown_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslidedown_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vslidedown_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslidedown_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vslidedown_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslidedown_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vslidedown_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslidedown_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vslidedown_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslidedown_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vslidedown_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslidedown_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vslidedown_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslidedown_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vslidedown_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslidedown_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vslidedown_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslidedown_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vslidedown_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslidedown_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vslidedown_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslidedown_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vslidedown_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslidedown_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vslidedown_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslidedown_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vslidedown_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslidedown_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vslideup.c b/auto-generated/policy_funcs/llvm-api-tests/vslideup.c index 0cb8ee641..7dae87ea7 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vslideup.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vslideup.c @@ -1,951 +1,1262 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vfloat16mf4_t test_vslideup_vx_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t rs1, size_t vl) { +vfloat16mf4_t test_vslideup_vx_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_f16mf4_tu(vd, vs2, rs1, vl); } -vfloat16mf2_t test_vslideup_vx_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t rs1, size_t vl) { +vfloat16mf2_t test_vslideup_vx_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_f16mf2_tu(vd, vs2, rs1, vl); } -vfloat16m1_t test_vslideup_vx_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, size_t rs1, size_t vl) { +vfloat16m1_t test_vslideup_vx_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_f16m1_tu(vd, vs2, rs1, vl); } -vfloat16m2_t test_vslideup_vx_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, size_t rs1, size_t vl) { +vfloat16m2_t test_vslideup_vx_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_f16m2_tu(vd, vs2, rs1, vl); } -vfloat16m4_t test_vslideup_vx_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, size_t rs1, size_t vl) { +vfloat16m4_t test_vslideup_vx_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_f16m4_tu(vd, vs2, rs1, vl); } -vfloat16m8_t test_vslideup_vx_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, size_t rs1, size_t vl) { +vfloat16m8_t test_vslideup_vx_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_f16m8_tu(vd, vs2, rs1, vl); } -vfloat32mf2_t test_vslideup_vx_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t rs1, size_t vl) { +vfloat32mf2_t test_vslideup_vx_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_f32mf2_tu(vd, vs2, rs1, vl); } -vfloat32m1_t test_vslideup_vx_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, size_t rs1, size_t vl) { +vfloat32m1_t test_vslideup_vx_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_f32m1_tu(vd, vs2, rs1, vl); } -vfloat32m2_t test_vslideup_vx_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, size_t rs1, size_t vl) { +vfloat32m2_t test_vslideup_vx_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_f32m2_tu(vd, vs2, rs1, vl); } -vfloat32m4_t test_vslideup_vx_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, size_t rs1, size_t vl) { +vfloat32m4_t test_vslideup_vx_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_f32m4_tu(vd, vs2, rs1, vl); } -vfloat32m8_t test_vslideup_vx_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, size_t rs1, size_t vl) { +vfloat32m8_t test_vslideup_vx_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_f32m8_tu(vd, vs2, rs1, vl); } -vfloat64m1_t test_vslideup_vx_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, size_t rs1, size_t vl) { +vfloat64m1_t test_vslideup_vx_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_f64m1_tu(vd, vs2, rs1, vl); } -vfloat64m2_t test_vslideup_vx_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, size_t rs1, size_t vl) { +vfloat64m2_t test_vslideup_vx_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_f64m2_tu(vd, vs2, rs1, vl); } -vfloat64m4_t test_vslideup_vx_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, size_t rs1, size_t vl) { +vfloat64m4_t test_vslideup_vx_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_f64m4_tu(vd, vs2, rs1, vl); } -vfloat64m8_t test_vslideup_vx_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, size_t rs1, size_t vl) { +vfloat64m8_t test_vslideup_vx_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_f64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vslideup_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { +vint8mf8_t test_vslideup_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vslideup_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { +vint8mf4_t test_vslideup_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vslideup_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { +vint8mf2_t test_vslideup_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vslideup_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { +vint8m1_t test_vslideup_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vslideup_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { +vint8m2_t test_vslideup_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vslideup_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { +vint8m4_t test_vslideup_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vslideup_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { +vint8m8_t test_vslideup_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vslideup_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { +vint16mf4_t test_vslideup_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vslideup_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { +vint16mf2_t test_vslideup_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vslideup_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { +vint16m1_t test_vslideup_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vslideup_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { +vint16m2_t test_vslideup_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vslideup_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { +vint16m4_t test_vslideup_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vslideup_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { +vint16m8_t test_vslideup_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vslideup_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { +vint32mf2_t test_vslideup_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vslideup_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { +vint32m1_t test_vslideup_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vslideup_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { +vint32m2_t test_vslideup_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vslideup_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { +vint32m4_t test_vslideup_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vslideup_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { +vint32m8_t test_vslideup_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vslideup_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { +vint64m1_t test_vslideup_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vslideup_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { +vint64m2_t test_vslideup_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vslideup_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { +vint64m4_t test_vslideup_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vslideup_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { +vint64m8_t test_vslideup_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_i64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vslideup_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { +vuint8mf8_t test_vslideup_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vslideup_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { +vuint8mf4_t test_vslideup_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vslideup_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { +vuint8mf2_t test_vslideup_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vslideup_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { +vuint8m1_t test_vslideup_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vslideup_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { +vuint8m2_t test_vslideup_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vslideup_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { +vuint8m4_t test_vslideup_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vslideup_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { +vuint8m8_t test_vslideup_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vslideup_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vslideup_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vslideup_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vslideup_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vslideup_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vslideup_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vslideup_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vslideup_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vslideup_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vslideup_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vslideup_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vslideup_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vslideup_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vslideup_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vslideup_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vslideup_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vslideup_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vslideup_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vslideup_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vslideup_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vslideup_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vslideup_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vslideup_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vslideup_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vslideup_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vslideup_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vslideup_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vslideup_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vslideup_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vslideup_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_u64m8_tu(vd, vs2, rs1, vl); } -vfloat16mf4_t test_vslideup_vx_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t rs1, size_t vl) { +vfloat16mf4_t test_vslideup_vx_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f16mf4_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vslideup_vx_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t rs1, size_t vl) { +vfloat16mf2_t test_vslideup_vx_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f16mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vslideup_vx_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t rs1, size_t vl) { +vfloat16m1_t test_vslideup_vx_f16m1_tum(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f16m1_tum(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vslideup_vx_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t rs1, size_t vl) { +vfloat16m2_t test_vslideup_vx_f16m2_tum(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f16m2_tum(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vslideup_vx_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t rs1, size_t vl) { +vfloat16m4_t test_vslideup_vx_f16m4_tum(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f16m4_tum(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vslideup_vx_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t rs1, size_t vl) { +vfloat16m8_t test_vslideup_vx_f16m8_tum(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f16m8_tum(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vslideup_vx_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t rs1, size_t vl) { +vfloat32mf2_t test_vslideup_vx_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f32mf2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vslideup_vx_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t rs1, size_t vl) { +vfloat32m1_t test_vslideup_vx_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f32m1_tum(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vslideup_vx_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t rs1, size_t vl) { +vfloat32m2_t test_vslideup_vx_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f32m2_tum(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vslideup_vx_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t rs1, size_t vl) { +vfloat32m4_t test_vslideup_vx_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f32m4_tum(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vslideup_vx_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t rs1, size_t vl) { +vfloat32m8_t test_vslideup_vx_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f32m8_tum(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vslideup_vx_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t rs1, size_t vl) { +vfloat64m1_t test_vslideup_vx_f64m1_tum(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f64m1_tum(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vslideup_vx_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t rs1, size_t vl) { +vfloat64m2_t test_vslideup_vx_f64m2_tum(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f64m2_tum(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vslideup_vx_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t rs1, size_t vl) { +vfloat64m4_t test_vslideup_vx_f64m4_tum(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f64m4_tum(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vslideup_vx_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t rs1, size_t vl) { +vfloat64m8_t test_vslideup_vx_f64m8_tum(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslideup_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { +vint8mf8_t test_vslideup_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + vint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslideup_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { +vint8mf4_t test_vslideup_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + vint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslideup_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { +vint8mf2_t test_vslideup_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, + vint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslideup_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { +vint8m1_t test_vslideup_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslideup_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { +vint8m2_t test_vslideup_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslideup_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { +vint8m4_t test_vslideup_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslideup_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { +vint8m8_t test_vslideup_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslideup_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { +vint16mf4_t test_vslideup_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslideup_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { +vint16mf2_t test_vslideup_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslideup_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { +vint16m1_t test_vslideup_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslideup_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { +vint16m2_t test_vslideup_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, + vint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslideup_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { +vint16m4_t test_vslideup_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, + vint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslideup_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { +vint16m8_t test_vslideup_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, + vint16m8_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslideup_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { +vint32mf2_t test_vslideup_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslideup_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { +vint32m1_t test_vslideup_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslideup_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { +vint32m2_t test_vslideup_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, + vint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslideup_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { +vint32m4_t test_vslideup_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, + vint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslideup_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { +vint32m8_t test_vslideup_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, + vint32m8_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslideup_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { +vint64m1_t test_vslideup_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslideup_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { +vint64m2_t test_vslideup_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, + vint64m2_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslideup_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { +vint64m4_t test_vslideup_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, + vint64m4_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslideup_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { +vint64m8_t test_vslideup_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, + vint64m8_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslideup_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { +vuint8mf8_t test_vslideup_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslideup_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { +vuint8mf4_t test_vslideup_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslideup_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { +vuint8mf2_t test_vslideup_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslideup_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { +vuint8m1_t test_vslideup_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslideup_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { +vuint8m2_t test_vslideup_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslideup_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { +vuint8m4_t test_vslideup_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslideup_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { +vuint8m8_t test_vslideup_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslideup_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vslideup_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslideup_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vslideup_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslideup_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vslideup_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslideup_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vslideup_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslideup_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vslideup_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslideup_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vslideup_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslideup_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vslideup_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslideup_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vslideup_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslideup_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vslideup_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslideup_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vslideup_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslideup_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vslideup_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslideup_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vslideup_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslideup_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vslideup_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslideup_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vslideup_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslideup_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vslideup_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vslideup_vx_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t rs1, size_t vl) { +vfloat16mf4_t test_vslideup_vx_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f16mf4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vslideup_vx_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t rs1, size_t vl) { +vfloat16mf2_t test_vslideup_vx_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f16mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vslideup_vx_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t rs1, size_t vl) { +vfloat16m1_t test_vslideup_vx_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f16m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vslideup_vx_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t rs1, size_t vl) { +vfloat16m2_t test_vslideup_vx_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f16m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vslideup_vx_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t rs1, size_t vl) { +vfloat16m4_t test_vslideup_vx_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f16m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vslideup_vx_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t rs1, size_t vl) { +vfloat16m8_t test_vslideup_vx_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f16m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vslideup_vx_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t rs1, size_t vl) { +vfloat32mf2_t test_vslideup_vx_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f32mf2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vslideup_vx_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t rs1, size_t vl) { +vfloat32m1_t test_vslideup_vx_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f32m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vslideup_vx_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t rs1, size_t vl) { +vfloat32m2_t test_vslideup_vx_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f32m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vslideup_vx_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t rs1, size_t vl) { +vfloat32m4_t test_vslideup_vx_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f32m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vslideup_vx_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t rs1, size_t vl) { +vfloat32m8_t test_vslideup_vx_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f32m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vslideup_vx_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t rs1, size_t vl) { +vfloat64m1_t test_vslideup_vx_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f64m1_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vslideup_vx_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t rs1, size_t vl) { +vfloat64m2_t test_vslideup_vx_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f64m2_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vslideup_vx_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t rs1, size_t vl) { +vfloat64m4_t test_vslideup_vx_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f64m4_tumu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vslideup_vx_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t rs1, size_t vl) { +vfloat64m8_t test_vslideup_vx_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslideup_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { +vint8mf8_t test_vslideup_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + vint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslideup_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { +vint8mf4_t test_vslideup_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + vint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslideup_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { +vint8mf2_t test_vslideup_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + vint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslideup_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { +vint8m1_t test_vslideup_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslideup_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { +vint8m2_t test_vslideup_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslideup_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { +vint8m4_t test_vslideup_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslideup_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { +vint8m8_t test_vslideup_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslideup_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { +vint16mf4_t test_vslideup_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslideup_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { +vint16mf2_t test_vslideup_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslideup_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { +vint16m1_t test_vslideup_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslideup_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { +vint16m2_t test_vslideup_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, + vint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslideup_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { +vint16m4_t test_vslideup_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, + vint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslideup_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { +vint16m8_t test_vslideup_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, + vint16m8_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslideup_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { +vint32mf2_t test_vslideup_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslideup_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { +vint32m1_t test_vslideup_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslideup_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { +vint32m2_t test_vslideup_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + vint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslideup_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { +vint32m4_t test_vslideup_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, + vint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslideup_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { +vint32m8_t test_vslideup_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, + vint32m8_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslideup_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { +vint64m1_t test_vslideup_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslideup_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { +vint64m2_t test_vslideup_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + vint64m2_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslideup_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { +vint64m4_t test_vslideup_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + vint64m4_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslideup_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { +vint64m8_t test_vslideup_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, + vint64m8_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslideup_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { +vuint8mf8_t test_vslideup_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslideup_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { +vuint8mf4_t test_vslideup_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslideup_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { +vuint8mf2_t test_vslideup_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslideup_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { +vuint8m1_t test_vslideup_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + vuint8m1_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslideup_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { +vuint8m2_t test_vslideup_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + vuint8m2_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslideup_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { +vuint8m4_t test_vslideup_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + vuint8m4_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslideup_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { +vuint8m8_t test_vslideup_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, + vuint8m8_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslideup_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vslideup_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslideup_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vslideup_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslideup_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vslideup_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslideup_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vslideup_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslideup_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vslideup_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslideup_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vslideup_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslideup_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vslideup_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslideup_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vslideup_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslideup_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vslideup_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslideup_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vslideup_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslideup_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vslideup_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslideup_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vslideup_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslideup_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vslideup_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslideup_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vslideup_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslideup_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vslideup_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vfloat16mf4_t test_vslideup_vx_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, vfloat16mf4_t vs2, size_t rs1, size_t vl) { +vfloat16mf4_t test_vslideup_vx_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd, + vfloat16mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f16mf4_mu(vm, vd, vs2, rs1, vl); } -vfloat16mf2_t test_vslideup_vx_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, vfloat16mf2_t vs2, size_t rs1, size_t vl) { +vfloat16mf2_t test_vslideup_vx_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd, + vfloat16mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f16mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m1_t test_vslideup_vx_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, vfloat16m1_t vs2, size_t rs1, size_t vl) { +vfloat16m1_t test_vslideup_vx_f16m1_mu(vbool16_t vm, vfloat16m1_t vd, + vfloat16m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f16m1_mu(vm, vd, vs2, rs1, vl); } -vfloat16m2_t test_vslideup_vx_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, vfloat16m2_t vs2, size_t rs1, size_t vl) { +vfloat16m2_t test_vslideup_vx_f16m2_mu(vbool8_t vm, vfloat16m2_t vd, + vfloat16m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f16m2_mu(vm, vd, vs2, rs1, vl); } -vfloat16m4_t test_vslideup_vx_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, vfloat16m4_t vs2, size_t rs1, size_t vl) { +vfloat16m4_t test_vslideup_vx_f16m4_mu(vbool4_t vm, vfloat16m4_t vd, + vfloat16m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f16m4_mu(vm, vd, vs2, rs1, vl); } -vfloat16m8_t test_vslideup_vx_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, vfloat16m8_t vs2, size_t rs1, size_t vl) { +vfloat16m8_t test_vslideup_vx_f16m8_mu(vbool2_t vm, vfloat16m8_t vd, + vfloat16m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f16m8_mu(vm, vd, vs2, rs1, vl); } -vfloat32mf2_t test_vslideup_vx_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, vfloat32mf2_t vs2, size_t rs1, size_t vl) { +vfloat32mf2_t test_vslideup_vx_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f32mf2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m1_t test_vslideup_vx_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, vfloat32m1_t vs2, size_t rs1, size_t vl) { +vfloat32m1_t test_vslideup_vx_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f32m1_mu(vm, vd, vs2, rs1, vl); } -vfloat32m2_t test_vslideup_vx_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, vfloat32m2_t vs2, size_t rs1, size_t vl) { +vfloat32m2_t test_vslideup_vx_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f32m2_mu(vm, vd, vs2, rs1, vl); } -vfloat32m4_t test_vslideup_vx_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, vfloat32m4_t vs2, size_t rs1, size_t vl) { +vfloat32m4_t test_vslideup_vx_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f32m4_mu(vm, vd, vs2, rs1, vl); } -vfloat32m8_t test_vslideup_vx_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, vfloat32m8_t vs2, size_t rs1, size_t vl) { +vfloat32m8_t test_vslideup_vx_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f32m8_mu(vm, vd, vs2, rs1, vl); } -vfloat64m1_t test_vslideup_vx_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, vfloat64m1_t vs2, size_t rs1, size_t vl) { +vfloat64m1_t test_vslideup_vx_f64m1_mu(vbool64_t vm, vfloat64m1_t vd, + vfloat64m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f64m1_mu(vm, vd, vs2, rs1, vl); } -vfloat64m2_t test_vslideup_vx_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, vfloat64m2_t vs2, size_t rs1, size_t vl) { +vfloat64m2_t test_vslideup_vx_f64m2_mu(vbool32_t vm, vfloat64m2_t vd, + vfloat64m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f64m2_mu(vm, vd, vs2, rs1, vl); } -vfloat64m4_t test_vslideup_vx_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, vfloat64m4_t vs2, size_t rs1, size_t vl) { +vfloat64m4_t test_vslideup_vx_f64m4_mu(vbool16_t vm, vfloat64m4_t vd, + vfloat64m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f64m4_mu(vm, vd, vs2, rs1, vl); } -vfloat64m8_t test_vslideup_vx_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, vfloat64m8_t vs2, size_t rs1, size_t vl) { +vfloat64m8_t test_vslideup_vx_f64m8_mu(vbool8_t vm, vfloat64m8_t vd, + vfloat64m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_f64m8_mu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vslideup_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { +vint8mf8_t test_vslideup_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, + vint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vslideup_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { +vint8mf4_t test_vslideup_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, + vint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vslideup_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { +vint8mf2_t test_vslideup_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, + vint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vslideup_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { +vint8m1_t test_vslideup_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vslideup_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { +vint8m2_t test_vslideup_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vslideup_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { +vint8m4_t test_vslideup_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vslideup_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { +vint8m8_t test_vslideup_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vslideup_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { +vint16mf4_t test_vslideup_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vslideup_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { +vint16mf2_t test_vslideup_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vslideup_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { +vint16m1_t test_vslideup_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, + vint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vslideup_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { +vint16m2_t test_vslideup_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vslideup_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { +vint16m4_t test_vslideup_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vslideup_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { +vint16m8_t test_vslideup_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vslideup_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { +vint32mf2_t test_vslideup_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vslideup_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { +vint32m1_t test_vslideup_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, + vint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vslideup_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { +vint32m2_t test_vslideup_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, + vint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vslideup_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { +vint32m4_t test_vslideup_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vslideup_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { +vint32m8_t test_vslideup_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vslideup_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { +vint64m1_t test_vslideup_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, + vint64m1_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vslideup_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { +vint64m2_t test_vslideup_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, + vint64m2_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vslideup_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { +vint64m4_t test_vslideup_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, + vint64m4_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vslideup_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { +vint64m8_t test_vslideup_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vslideup_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { +vuint8mf8_t test_vslideup_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vslideup_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { +vuint8mf4_t test_vslideup_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vslideup_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { +vuint8mf2_t test_vslideup_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vslideup_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { +vuint8m1_t test_vslideup_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vslideup_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { +vuint8m2_t test_vslideup_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vslideup_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { +vuint8m4_t test_vslideup_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vslideup_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { +vuint8m8_t test_vslideup_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vslideup_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vslideup_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vslideup_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vslideup_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vslideup_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vslideup_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vslideup_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vslideup_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vslideup_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vslideup_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vslideup_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vslideup_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vslideup_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vslideup_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vslideup_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vslideup_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vslideup_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vslideup_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vslideup_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vslideup_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vslideup_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vslideup_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vslideup_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vslideup_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vslideup_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vslideup_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vslideup_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vslideup_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vslideup_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vslideup_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vslideup_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vslideup_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl) { return __riscv_vslideup_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vsll.c b/auto-generated/policy_funcs/llvm-api-tests/vsll.c index b63c36f55..9e1933279 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vsll.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vsll.c @@ -5,1410 +5,1804 @@ #include -vint8mf8_t test_vsll_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vsll_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vsll_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vsll_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { +vint8mf8_t test_vsll_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vsll_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vsll_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vsll_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vsll_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { +vint8mf4_t test_vsll_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vsll_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vsll_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vsll_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vsll_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { +vint8mf2_t test_vsll_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vsll_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vint8m1_t test_vsll_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vsll_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vsll_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { +vint8m1_t test_vsll_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vsll_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vint8m2_t test_vsll_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vsll_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vsll_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { +vint8m2_t test_vsll_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vsll_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vint8m4_t test_vsll_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vsll_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vsll_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { +vint8m4_t test_vsll_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vsll_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vint8m8_t test_vsll_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { return __riscv_vsll_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vsll_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { +vint8m8_t test_vsll_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vsll_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vsll_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vsll_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vsll_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { +vint16mf4_t test_vsll_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vsll_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vsll_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vsll_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vsll_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { +vint16mf2_t test_vsll_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vsll_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vsll_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vsll_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vsll_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { +vint16m1_t test_vsll_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vsll_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vsll_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vsll_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vsll_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { +vint16m2_t test_vsll_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vsll_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vsll_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vsll_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vsll_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { +vint16m4_t test_vsll_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vsll_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vint16m8_t test_vsll_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vsll_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vsll_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { +vint16m8_t test_vsll_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vsll_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vsll_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vsll_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vsll_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { +vint32mf2_t test_vsll_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vsll_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vint32m1_t test_vsll_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vsll_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vsll_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { +vint32m1_t test_vsll_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vsll_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vint32m2_t test_vsll_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vsll_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vsll_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { +vint32m2_t test_vsll_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vsll_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vint32m4_t test_vsll_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vsll_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vsll_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { +vint32m4_t test_vsll_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vsll_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vint32m8_t test_vsll_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vsll_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vsll_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { +vint32m8_t test_vsll_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vsll_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vint64m1_t test_vsll_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vsll_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vsll_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { +vint64m1_t test_vsll_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vsll_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vint64m2_t test_vsll_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vsll_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vsll_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { +vint64m2_t test_vsll_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vsll_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vint64m4_t test_vsll_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vsll_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vsll_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { +vint64m4_t test_vsll_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vsll_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vint64m8_t test_vsll_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vsll_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vsll_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { +vint64m8_t test_vsll_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_i64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vsll_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vsll_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vsll_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vsll_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { +vuint8mf8_t test_vsll_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vsll_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vsll_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vsll_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vsll_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { +vuint8mf4_t test_vsll_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vsll_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vsll_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vsll_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vsll_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { +vuint8mf2_t test_vsll_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vsll_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vsll_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vsll_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vsll_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { +vuint8m1_t test_vsll_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vsll_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vsll_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vsll_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vsll_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { +vuint8m2_t test_vsll_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vsll_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vsll_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vsll_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vsll_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { +vuint8m4_t test_vsll_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vsll_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vsll_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { return __riscv_vsll_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vsll_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { +vuint8m8_t test_vsll_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vsll_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vsll_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vsll_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vsll_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vsll_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vsll_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vsll_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vsll_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vsll_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vsll_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vsll_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vsll_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vsll_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vsll_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vsll_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vsll_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vsll_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vsll_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vsll_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vsll_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vsll_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vsll_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vsll_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vsll_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vsll_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vsll_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vsll_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vsll_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vsll_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vsll_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vsll_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vsll_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vsll_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vsll_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vsll_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vsll_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vsll_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vsll_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vsll_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vsll_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vsll_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vsll_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vsll_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vsll_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vsll_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vsll_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vsll_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vsll_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vsll_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vsll_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vsll_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vsll_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vsll_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vsll_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vsll_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vsll_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vsll_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vsll_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vsll_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vsll_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vsll_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vsll_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vsll_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vsll_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vsll_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vsll_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vsll_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vsll_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vsll_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vsll_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vsll_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vsll_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vsll_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vsll_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vsll_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vsll_vx_u64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vsll_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vsll_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vsll_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsll_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { +vint8mf8_t test_vsll_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsll_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vsll_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vsll_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsll_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { +vint8mf4_t test_vsll_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsll_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vsll_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vsll_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsll_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { +vint8mf2_t test_vsll_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsll_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vint8m1_t test_vsll_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vsll_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsll_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { +vint8m1_t test_vsll_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsll_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vint8m2_t test_vsll_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vsll_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsll_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { +vint8m2_t test_vsll_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsll_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vint8m4_t test_vsll_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vsll_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsll_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { +vint8m4_t test_vsll_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsll_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vint8m8_t test_vsll_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vsll_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsll_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { +vint8m8_t test_vsll_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsll_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vsll_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vsll_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsll_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { +vint16mf4_t test_vsll_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsll_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vsll_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vsll_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsll_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { +vint16mf2_t test_vsll_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsll_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vsll_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vsll_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsll_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { +vint16m1_t test_vsll_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsll_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vsll_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vsll_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsll_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { +vint16m2_t test_vsll_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsll_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vsll_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vsll_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsll_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { +vint16m4_t test_vsll_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsll_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vint16m8_t test_vsll_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vsll_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsll_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { +vint16m8_t test_vsll_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsll_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vsll_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vsll_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsll_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { +vint32mf2_t test_vsll_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsll_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vint32m1_t test_vsll_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vsll_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsll_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { +vint32m1_t test_vsll_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsll_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vint32m2_t test_vsll_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vsll_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsll_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { +vint32m2_t test_vsll_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsll_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vint32m4_t test_vsll_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vsll_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsll_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { +vint32m4_t test_vsll_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsll_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vint32m8_t test_vsll_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vsll_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsll_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { +vint32m8_t test_vsll_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsll_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vint64m1_t test_vsll_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vsll_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsll_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { +vint64m1_t test_vsll_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsll_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vint64m2_t test_vsll_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vsll_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsll_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { +vint64m2_t test_vsll_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsll_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vint64m4_t test_vsll_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vsll_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsll_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { +vint64m4_t test_vsll_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsll_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vint64m8_t test_vsll_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vsll_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsll_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { +vint64m8_t test_vsll_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vsll_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vsll_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vsll_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsll_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { +vuint8mf8_t test_vsll_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsll_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vsll_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vsll_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsll_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { +vuint8mf4_t test_vsll_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsll_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vsll_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vsll_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsll_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { +vuint8mf2_t test_vsll_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsll_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vsll_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vsll_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsll_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { +vuint8m1_t test_vsll_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsll_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vsll_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vsll_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsll_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { +vuint8m2_t test_vsll_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsll_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vsll_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vsll_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsll_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { +vuint8m4_t test_vsll_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsll_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vsll_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vsll_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsll_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { +vuint8m8_t test_vsll_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsll_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vsll_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vsll_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsll_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vsll_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsll_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vsll_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vsll_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsll_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vsll_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsll_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vsll_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vsll_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsll_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vsll_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsll_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vsll_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vsll_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsll_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vsll_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsll_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vsll_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vsll_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsll_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vsll_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsll_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vsll_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vsll_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsll_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vsll_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsll_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vsll_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vsll_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsll_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vsll_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsll_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vsll_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vsll_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsll_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vsll_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsll_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vsll_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vsll_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsll_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vsll_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsll_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vsll_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vsll_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsll_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vsll_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsll_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vsll_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vsll_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsll_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vsll_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsll_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vsll_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vsll_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsll_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vsll_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsll_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vsll_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vsll_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsll_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vsll_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsll_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vsll_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vsll_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsll_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vsll_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsll_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vsll_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vsll_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsll_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vsll_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vsll_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vsll_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vsll_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsll_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { +vint8mf8_t test_vsll_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsll_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vsll_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vsll_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsll_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { +vint8mf4_t test_vsll_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsll_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vsll_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vsll_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsll_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { +vint8mf2_t test_vsll_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsll_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vint8m1_t test_vsll_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vsll_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsll_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { +vint8m1_t test_vsll_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsll_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vint8m2_t test_vsll_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vsll_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsll_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { +vint8m2_t test_vsll_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsll_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vint8m4_t test_vsll_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vsll_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsll_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { +vint8m4_t test_vsll_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsll_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vint8m8_t test_vsll_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vsll_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsll_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { +vint8m8_t test_vsll_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsll_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vsll_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vsll_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsll_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { +vint16mf4_t test_vsll_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsll_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vsll_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vsll_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsll_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { +vint16mf2_t test_vsll_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsll_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vsll_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vsll_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsll_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { +vint16m1_t test_vsll_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsll_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vsll_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vsll_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsll_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { +vint16m2_t test_vsll_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsll_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vsll_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vsll_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsll_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { +vint16m4_t test_vsll_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsll_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vint16m8_t test_vsll_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vsll_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsll_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { +vint16m8_t test_vsll_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsll_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vsll_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vsll_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsll_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { +vint32mf2_t test_vsll_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsll_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vint32m1_t test_vsll_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vsll_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsll_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { +vint32m1_t test_vsll_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsll_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vint32m2_t test_vsll_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vsll_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsll_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { +vint32m2_t test_vsll_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsll_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vint32m4_t test_vsll_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vsll_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsll_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { +vint32m4_t test_vsll_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsll_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vint32m8_t test_vsll_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vsll_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsll_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { +vint32m8_t test_vsll_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsll_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vint64m1_t test_vsll_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vsll_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsll_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { +vint64m1_t test_vsll_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsll_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vint64m2_t test_vsll_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vsll_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsll_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { +vint64m2_t test_vsll_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsll_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vint64m4_t test_vsll_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vsll_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsll_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { +vint64m4_t test_vsll_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsll_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vint64m8_t test_vsll_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vsll_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsll_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { +vint64m8_t test_vsll_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vsll_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vsll_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vsll_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsll_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { +vuint8mf8_t test_vsll_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsll_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vsll_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vsll_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsll_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { +vuint8mf4_t test_vsll_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsll_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vsll_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vsll_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsll_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { +vuint8mf2_t test_vsll_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsll_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vsll_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vsll_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsll_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { +vuint8m1_t test_vsll_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsll_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vsll_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vsll_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsll_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { +vuint8m2_t test_vsll_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsll_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vsll_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vsll_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsll_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { +vuint8m4_t test_vsll_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsll_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vsll_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vsll_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsll_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { +vuint8m8_t test_vsll_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsll_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vsll_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vsll_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsll_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vsll_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsll_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vsll_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vsll_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsll_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vsll_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsll_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vsll_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vsll_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsll_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vsll_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsll_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vsll_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vsll_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsll_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vsll_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsll_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vsll_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vsll_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsll_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vsll_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsll_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vsll_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vsll_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsll_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vsll_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsll_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vsll_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vsll_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsll_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vsll_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsll_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vsll_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vsll_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsll_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vsll_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsll_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vsll_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vsll_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsll_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vsll_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsll_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vsll_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vsll_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsll_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vsll_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsll_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vsll_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vsll_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsll_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vsll_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsll_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vsll_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vsll_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsll_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vsll_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsll_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vsll_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vsll_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsll_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vsll_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsll_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vsll_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vsll_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsll_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vsll_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsll_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vsll_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vsll_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsll_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vsll_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vsll_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vsll_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vsll_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsll_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { +vint8mf8_t test_vsll_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsll_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vsll_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vsll_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsll_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { +vint8mf4_t test_vsll_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsll_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vsll_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vsll_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsll_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { +vint8mf2_t test_vsll_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsll_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vint8m1_t test_vsll_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vsll_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsll_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { +vint8m1_t test_vsll_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsll_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vint8m2_t test_vsll_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vsll_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsll_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { +vint8m2_t test_vsll_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsll_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vint8m4_t test_vsll_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vsll_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsll_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { +vint8m4_t test_vsll_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsll_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vint8m8_t test_vsll_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vsll_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsll_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { +vint8m8_t test_vsll_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsll_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vsll_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vsll_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsll_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { +vint16mf4_t test_vsll_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsll_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vsll_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vsll_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsll_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { +vint16mf2_t test_vsll_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsll_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vsll_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vsll_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsll_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { +vint16m1_t test_vsll_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsll_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vsll_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vsll_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsll_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { +vint16m2_t test_vsll_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsll_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vsll_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vsll_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsll_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { +vint16m4_t test_vsll_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsll_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vint16m8_t test_vsll_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vsll_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsll_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { +vint16m8_t test_vsll_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsll_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vsll_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vsll_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsll_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { +vint32mf2_t test_vsll_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsll_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vint32m1_t test_vsll_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vsll_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsll_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { +vint32m1_t test_vsll_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsll_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vint32m2_t test_vsll_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vsll_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsll_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { +vint32m2_t test_vsll_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsll_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vint32m4_t test_vsll_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vsll_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsll_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { +vint32m4_t test_vsll_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsll_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vint32m8_t test_vsll_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vsll_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsll_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { +vint32m8_t test_vsll_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsll_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vint64m1_t test_vsll_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vsll_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsll_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { +vint64m1_t test_vsll_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsll_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vint64m2_t test_vsll_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vsll_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsll_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { +vint64m2_t test_vsll_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsll_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vint64m4_t test_vsll_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vsll_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsll_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { +vint64m4_t test_vsll_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsll_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vint64m8_t test_vsll_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vsll_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsll_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { +vint64m8_t test_vsll_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vsll_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vsll_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vsll_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsll_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { +vuint8mf8_t test_vsll_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsll_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vsll_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vsll_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsll_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { +vuint8mf4_t test_vsll_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsll_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vsll_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vsll_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsll_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { +vuint8mf2_t test_vsll_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsll_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vsll_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vsll_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsll_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { +vuint8m1_t test_vsll_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsll_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vsll_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vsll_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsll_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { +vuint8m2_t test_vsll_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsll_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vsll_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vsll_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsll_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { +vuint8m4_t test_vsll_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsll_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vsll_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vsll_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsll_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { +vuint8m8_t test_vsll_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsll_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vsll_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vsll_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsll_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vsll_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsll_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vsll_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vsll_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsll_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vsll_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsll_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vsll_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vsll_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsll_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vsll_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsll_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vsll_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vsll_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsll_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vsll_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsll_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vsll_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vsll_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsll_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vsll_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsll_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vsll_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vsll_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsll_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vsll_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsll_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vsll_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vsll_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsll_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vsll_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vsll_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsll_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vsll_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vsll_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsll_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vsll_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsll_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vsll_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vsll_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsll_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vsll_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsll_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vsll_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vsll_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsll_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vsll_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsll_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vsll_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vsll_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsll_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vsll_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsll_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vsll_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vsll_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsll_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vsll_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsll_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vsll_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vsll_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsll_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vsll_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsll_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vsll_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vsll_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsll_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vsll_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsll_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vsll_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vsll_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsll_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vsll_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsll_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vsmul.c b/auto-generated/policy_funcs/llvm-api-tests/vsmul.c index ad9178baa..af70b30d4 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vsmul.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vsmul.c @@ -5,706 +5,891 @@ #include -vint8mf8_t test_vsmul_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vsmul_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, + size_t vl) { return __riscv_vsmul_vv_i8mf8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vsmul_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vsmul_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vsmul_vx_i8mf8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vsmul_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, + size_t vl) { return __riscv_vsmul_vv_i8mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vsmul_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vsmul_vx_i8mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vsmul_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, + size_t vl) { return __riscv_vsmul_vv_i8mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vsmul_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vsmul_vx_i8mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vsmul_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vsmul_vv_i8m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vsmul_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, + size_t vl) { return __riscv_vsmul_vx_i8m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vsmul_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, + size_t vl) { return __riscv_vsmul_vv_i8m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vsmul_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vsmul_vx_i8m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vsmul_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, + size_t vl) { return __riscv_vsmul_vv_i8m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vsmul_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vsmul_vx_i8m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vsmul_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, + size_t vl) { return __riscv_vsmul_vv_i8m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vsmul_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vsmul_vx_i8m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vsmul_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + vint16mf4_t vs1, size_t vl) { return __riscv_vsmul_vv_i16mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vsmul_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsmul_vx_i16mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vsmul_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vsmul_vv_i16mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vsmul_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsmul_vx_i16mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vsmul_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vsmul_vv_i16m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vsmul_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, + size_t vl) { return __riscv_vsmul_vx_i16m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vsmul_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, + size_t vl) { return __riscv_vsmul_vv_i16m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vsmul_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vsmul_vx_i16m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vsmul_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, + size_t vl) { return __riscv_vsmul_vv_i16m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vsmul_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vsmul_vx_i16m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vsmul_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, + size_t vl) { return __riscv_vsmul_vv_i16m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vsmul_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, + size_t vl) { return __riscv_vsmul_vx_i16m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vsmul_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vsmul_vv_i32mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vsmul_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsmul_vx_i32mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vsmul_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vsmul_vv_i32m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vsmul_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, + size_t vl) { return __riscv_vsmul_vx_i32m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vsmul_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, + size_t vl) { return __riscv_vsmul_vv_i32m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vsmul_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vsmul_vx_i32m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vsmul_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, + size_t vl) { return __riscv_vsmul_vv_i32m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vsmul_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, + size_t vl) { return __riscv_vsmul_vx_i32m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vsmul_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, + size_t vl) { return __riscv_vsmul_vv_i32m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vsmul_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, + size_t vl) { return __riscv_vsmul_vx_i32m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vsmul_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vsmul_vv_i64m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vsmul_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, + size_t vl) { return __riscv_vsmul_vx_i64m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vsmul_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, + size_t vl) { return __riscv_vsmul_vv_i64m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vsmul_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, + size_t vl) { return __riscv_vsmul_vx_i64m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vsmul_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, + size_t vl) { return __riscv_vsmul_vv_i64m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vsmul_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, + size_t vl) { return __riscv_vsmul_vx_i64m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vsmul_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, + size_t vl) { return __riscv_vsmul_vv_i64m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vsmul_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, + size_t vl) { return __riscv_vsmul_vx_i64m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vsmul_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vsmul_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vsmul_vv_i8mf8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vsmul_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vsmul_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsmul_vx_i8mf8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vsmul_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vsmul_vv_i8mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vsmul_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsmul_vx_i8mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vsmul_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vsmul_vv_i8mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vsmul_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsmul_vx_i8mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vsmul_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vsmul_vv_i8m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vsmul_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsmul_vx_i8m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vsmul_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vsmul_vv_i8m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vsmul_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsmul_vx_i8m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vsmul_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vsmul_vv_i8m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vsmul_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsmul_vx_i8m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vsmul_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vsmul_vv_i8m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vsmul_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsmul_vx_i8m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vsmul_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vsmul_vv_i16mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vsmul_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vsmul_vx_i16mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vsmul_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vsmul_vv_i16mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vsmul_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vsmul_vx_i16mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vsmul_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vsmul_vv_i16m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vsmul_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsmul_vx_i16m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vsmul_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vsmul_vv_i16m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vsmul_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsmul_vx_i16m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vsmul_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vsmul_vv_i16m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vsmul_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsmul_vx_i16m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vsmul_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vsmul_vv_i16m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vsmul_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsmul_vx_i16m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vsmul_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vsmul_vv_i32mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vsmul_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vsmul_vx_i32mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vsmul_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vsmul_vv_i32m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vsmul_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsmul_vx_i32m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vsmul_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vsmul_vv_i32m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vsmul_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsmul_vx_i32m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vsmul_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vsmul_vv_i32m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vsmul_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsmul_vx_i32m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vsmul_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vsmul_vv_i32m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vsmul_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsmul_vx_i32m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vsmul_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vsmul_vv_i64m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vsmul_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsmul_vx_i64m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vsmul_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vsmul_vv_i64m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vsmul_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsmul_vx_i64m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vsmul_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vsmul_vv_i64m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vsmul_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsmul_vx_i64m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vsmul_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vsmul_vv_i64m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vsmul_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsmul_vx_i64m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vsmul_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vsmul_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vsmul_vv_i8mf8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vsmul_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vsmul_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsmul_vx_i8mf8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vsmul_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vsmul_vv_i8mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vsmul_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsmul_vx_i8mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vsmul_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vsmul_vv_i8mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vsmul_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsmul_vx_i8mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vsmul_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vsmul_vv_i8m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vsmul_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsmul_vx_i8m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vsmul_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vsmul_vv_i8m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vsmul_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsmul_vx_i8m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vsmul_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vsmul_vv_i8m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vsmul_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsmul_vx_i8m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vsmul_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vsmul_vv_i8m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vsmul_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsmul_vx_i8m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vsmul_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vsmul_vv_i16mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vsmul_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vsmul_vx_i16mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vsmul_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vsmul_vv_i16mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vsmul_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vsmul_vx_i16mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vsmul_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vsmul_vv_i16m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vsmul_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsmul_vx_i16m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vsmul_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vsmul_vv_i16m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vsmul_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsmul_vx_i16m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vsmul_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vsmul_vv_i16m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vsmul_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsmul_vx_i16m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vsmul_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vsmul_vv_i16m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vsmul_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsmul_vx_i16m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vsmul_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vsmul_vv_i32mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vsmul_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vsmul_vx_i32mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vsmul_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vsmul_vv_i32m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vsmul_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsmul_vx_i32m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vsmul_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vsmul_vv_i32m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vsmul_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsmul_vx_i32m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vsmul_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vsmul_vv_i32m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vsmul_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsmul_vx_i32m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vsmul_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vsmul_vv_i32m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vsmul_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsmul_vx_i32m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vsmul_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vsmul_vv_i64m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vsmul_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsmul_vx_i64m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vsmul_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vsmul_vv_i64m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vsmul_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsmul_vx_i64m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vsmul_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vsmul_vv_i64m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vsmul_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsmul_vx_i64m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vsmul_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vsmul_vv_i64m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vsmul_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsmul_vx_i64m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vsmul_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vsmul_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vsmul_vv_i8mf8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vsmul_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vsmul_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsmul_vx_i8mf8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vsmul_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vsmul_vv_i8mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vsmul_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vsmul_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsmul_vx_i8mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vsmul_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vsmul_vv_i8mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vsmul_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vsmul_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsmul_vx_i8mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vsmul_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vsmul_vv_i8m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vsmul_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vsmul_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsmul_vx_i8m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vsmul_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vsmul_vv_i8m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vsmul_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vsmul_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsmul_vx_i8m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vsmul_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vsmul_vv_i8m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vsmul_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vsmul_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsmul_vx_i8m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vsmul_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vsmul_vv_i8m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vsmul_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vsmul_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsmul_vx_i8m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vsmul_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vsmul_vv_i16mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vsmul_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vsmul_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vsmul_vx_i16mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vsmul_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vsmul_vv_i16mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vsmul_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vsmul_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vsmul_vx_i16mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vsmul_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vsmul_vv_i16m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vsmul_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vsmul_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsmul_vx_i16m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vsmul_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vsmul_vv_i16m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vsmul_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vsmul_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsmul_vx_i16m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vsmul_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vsmul_vv_i16m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vsmul_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vsmul_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsmul_vx_i16m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vsmul_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vsmul_vv_i16m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vsmul_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vsmul_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsmul_vx_i16m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vsmul_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vsmul_vv_i32mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vsmul_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vsmul_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vsmul_vx_i32mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vsmul_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vsmul_vv_i32m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vsmul_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vsmul_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsmul_vx_i32m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vsmul_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vsmul_vv_i32m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vsmul_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vsmul_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsmul_vx_i32m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vsmul_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vsmul_vv_i32m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vsmul_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vsmul_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsmul_vx_i32m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vsmul_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vsmul_vv_i32m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vsmul_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vsmul_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsmul_vx_i32m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vsmul_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vsmul_vv_i64m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vsmul_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vsmul_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsmul_vx_i64m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vsmul_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vsmul_vv_i64m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vsmul_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vsmul_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsmul_vx_i64m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vsmul_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vsmul_vv_i64m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vsmul_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vsmul_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsmul_vx_i64m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vsmul_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vsmul_vv_i64m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vsmul_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vsmul_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsmul_vx_i64m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vsra.c b/auto-generated/policy_funcs/llvm-api-tests/vsra.c index 8ae20e1c3..eaa2bd0af 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vsra.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vsra.c @@ -5,706 +5,891 @@ #include -vint8mf8_t test_vsra_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vsra_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vsra_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vsra_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { +vint8mf8_t test_vsra_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, + size_t vl) { return __riscv_vsra_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vsra_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vsra_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vsra_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vsra_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { +vint8mf4_t test_vsra_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vsra_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vsra_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vsra_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vsra_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vsra_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { +vint8mf2_t test_vsra_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vsra_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vsra_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vint8m1_t test_vsra_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vsra_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vsra_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { +vint8m1_t test_vsra_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vsra_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vsra_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vint8m2_t test_vsra_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vsra_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vsra_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { +vint8m2_t test_vsra_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vsra_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vsra_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vint8m4_t test_vsra_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vsra_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vsra_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { +vint8m4_t test_vsra_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vsra_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vsra_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vint8m8_t test_vsra_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { return __riscv_vsra_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vsra_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { +vint8m8_t test_vsra_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vsra_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vsra_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vsra_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vsra_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vsra_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { +vint16mf4_t test_vsra_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vsra_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vsra_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vsra_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vsra_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vsra_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { +vint16mf2_t test_vsra_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vsra_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vsra_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vsra_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vsra_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vsra_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { +vint16m1_t test_vsra_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vsra_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vsra_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vsra_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vsra_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vsra_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { +vint16m2_t test_vsra_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vsra_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vsra_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vsra_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vsra_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vsra_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { +vint16m4_t test_vsra_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vsra_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vsra_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vint16m8_t test_vsra_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vsra_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vsra_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { +vint16m8_t test_vsra_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vsra_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vsra_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vsra_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vsra_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vsra_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { +vint32mf2_t test_vsra_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vsra_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vsra_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vint32m1_t test_vsra_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vsra_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vsra_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { +vint32m1_t test_vsra_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vsra_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vsra_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vint32m2_t test_vsra_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vsra_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vsra_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { +vint32m2_t test_vsra_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vsra_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vsra_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vint32m4_t test_vsra_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vsra_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vsra_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { +vint32m4_t test_vsra_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vsra_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vsra_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vint32m8_t test_vsra_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vsra_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vsra_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { +vint32m8_t test_vsra_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vsra_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vsra_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vint64m1_t test_vsra_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vsra_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vsra_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { +vint64m1_t test_vsra_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vsra_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vsra_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vint64m2_t test_vsra_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vsra_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vsra_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { +vint64m2_t test_vsra_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vsra_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vsra_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vint64m4_t test_vsra_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vsra_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vsra_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { +vint64m4_t test_vsra_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vsra_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vsra_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vint64m8_t test_vsra_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vsra_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vsra_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { +vint64m8_t test_vsra_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vsra_vx_i64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vsra_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vsra_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vsra_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsra_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { +vint8mf8_t test_vsra_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsra_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vsra_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vsra_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsra_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { +vint8mf4_t test_vsra_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsra_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vsra_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vsra_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsra_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { +vint8mf2_t test_vsra_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsra_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vint8m1_t test_vsra_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vsra_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsra_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { +vint8m1_t test_vsra_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsra_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vint8m2_t test_vsra_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vsra_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsra_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { +vint8m2_t test_vsra_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsra_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vint8m4_t test_vsra_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vsra_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsra_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { +vint8m4_t test_vsra_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsra_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vint8m8_t test_vsra_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vsra_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsra_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { +vint8m8_t test_vsra_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsra_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vsra_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vsra_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsra_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { +vint16mf4_t test_vsra_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vsra_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsra_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vsra_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vsra_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsra_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { +vint16mf2_t test_vsra_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vsra_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsra_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vsra_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vsra_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsra_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { +vint16m1_t test_vsra_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsra_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vsra_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vsra_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsra_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { +vint16m2_t test_vsra_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsra_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vsra_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vsra_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsra_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { +vint16m4_t test_vsra_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsra_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vint16m8_t test_vsra_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vsra_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsra_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { +vint16m8_t test_vsra_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsra_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vsra_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vsra_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsra_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { +vint32mf2_t test_vsra_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vsra_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsra_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vint32m1_t test_vsra_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vsra_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsra_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { +vint32m1_t test_vsra_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsra_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vint32m2_t test_vsra_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vsra_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsra_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { +vint32m2_t test_vsra_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsra_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vint32m4_t test_vsra_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vsra_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsra_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { +vint32m4_t test_vsra_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsra_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vint32m8_t test_vsra_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vsra_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsra_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { +vint32m8_t test_vsra_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsra_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vint64m1_t test_vsra_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vsra_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsra_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { +vint64m1_t test_vsra_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsra_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vint64m2_t test_vsra_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vsra_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsra_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { +vint64m2_t test_vsra_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsra_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vint64m4_t test_vsra_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vsra_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsra_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { +vint64m4_t test_vsra_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsra_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vint64m8_t test_vsra_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vsra_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsra_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { +vint64m8_t test_vsra_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vsra_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vsra_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vsra_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsra_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { +vint8mf8_t test_vsra_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsra_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vsra_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vsra_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsra_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { +vint8mf4_t test_vsra_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsra_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vsra_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vsra_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsra_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { +vint8mf2_t test_vsra_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsra_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vint8m1_t test_vsra_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vsra_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsra_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { +vint8m1_t test_vsra_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsra_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vint8m2_t test_vsra_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vsra_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsra_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { +vint8m2_t test_vsra_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsra_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vint8m4_t test_vsra_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vsra_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsra_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { +vint8m4_t test_vsra_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsra_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vint8m8_t test_vsra_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vsra_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsra_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { +vint8m8_t test_vsra_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsra_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vsra_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vsra_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsra_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { +vint16mf4_t test_vsra_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vsra_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsra_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vsra_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vsra_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsra_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { +vint16mf2_t test_vsra_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vsra_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsra_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vsra_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vsra_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsra_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { +vint16m1_t test_vsra_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsra_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vsra_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vsra_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsra_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { +vint16m2_t test_vsra_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsra_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vsra_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vsra_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsra_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { +vint16m4_t test_vsra_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsra_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vint16m8_t test_vsra_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vsra_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsra_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { +vint16m8_t test_vsra_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsra_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vsra_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vsra_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsra_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { +vint32mf2_t test_vsra_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vsra_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsra_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vint32m1_t test_vsra_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vsra_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsra_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { +vint32m1_t test_vsra_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsra_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vint32m2_t test_vsra_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vsra_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsra_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { +vint32m2_t test_vsra_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsra_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vint32m4_t test_vsra_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vsra_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsra_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { +vint32m4_t test_vsra_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsra_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vint32m8_t test_vsra_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vsra_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsra_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { +vint32m8_t test_vsra_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsra_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vint64m1_t test_vsra_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vsra_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsra_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { +vint64m1_t test_vsra_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsra_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vint64m2_t test_vsra_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vsra_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsra_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { +vint64m2_t test_vsra_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsra_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vint64m4_t test_vsra_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vsra_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsra_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { +vint64m4_t test_vsra_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsra_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vint64m8_t test_vsra_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vsra_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsra_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { +vint64m8_t test_vsra_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vsra_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vsra_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vsra_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsra_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { +vint8mf8_t test_vsra_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsra_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vsra_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vsra_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsra_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { +vint8mf4_t test_vsra_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsra_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vsra_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vsra_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsra_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { +vint8mf2_t test_vsra_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsra_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vint8m1_t test_vsra_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vsra_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsra_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { +vint8m1_t test_vsra_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsra_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vint8m2_t test_vsra_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vsra_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsra_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { +vint8m2_t test_vsra_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsra_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vint8m4_t test_vsra_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vsra_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsra_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { +vint8m4_t test_vsra_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsra_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vint8m8_t test_vsra_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vsra_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsra_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { +vint8m8_t test_vsra_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsra_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vsra_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vsra_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsra_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { +vint16mf4_t test_vsra_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vsra_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsra_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vsra_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vsra_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsra_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { +vint16mf2_t test_vsra_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vsra_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsra_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vsra_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vsra_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsra_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { +vint16m1_t test_vsra_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsra_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vsra_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vsra_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsra_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { +vint16m2_t test_vsra_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsra_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vsra_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vsra_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsra_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { +vint16m4_t test_vsra_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsra_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vint16m8_t test_vsra_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vsra_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsra_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { +vint16m8_t test_vsra_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsra_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vsra_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vsra_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsra_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { +vint32mf2_t test_vsra_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vsra_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsra_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vint32m1_t test_vsra_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vsra_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsra_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { +vint32m1_t test_vsra_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsra_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vint32m2_t test_vsra_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vsra_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsra_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { +vint32m2_t test_vsra_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsra_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vint32m4_t test_vsra_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vsra_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsra_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { +vint32m4_t test_vsra_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsra_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vint32m8_t test_vsra_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vsra_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsra_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { +vint32m8_t test_vsra_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsra_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vint64m1_t test_vsra_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vsra_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsra_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { +vint64m1_t test_vsra_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsra_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vint64m2_t test_vsra_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vsra_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsra_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { +vint64m2_t test_vsra_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsra_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vint64m4_t test_vsra_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vsra_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsra_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { +vint64m4_t test_vsra_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsra_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vint64m8_t test_vsra_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vsra_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsra_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { +vint64m8_t test_vsra_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsra_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vsrl.c b/auto-generated/policy_funcs/llvm-api-tests/vsrl.c index 36d4eaadb..c6eea5303 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vsrl.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vsrl.c @@ -5,706 +5,915 @@ #include -vuint8mf8_t test_vsrl_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vsrl_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vsrl_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vsrl_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { +vuint8mf8_t test_vsrl_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, + size_t vl) { return __riscv_vsrl_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vsrl_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vsrl_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vsrl_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vsrl_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { +vuint8mf4_t test_vsrl_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vsrl_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vsrl_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vsrl_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vsrl_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vsrl_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { +vuint8mf2_t test_vsrl_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vsrl_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vsrl_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vsrl_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vsrl_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vsrl_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { +vuint8m1_t test_vsrl_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vsrl_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vsrl_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vsrl_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vsrl_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vsrl_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { +vuint8m2_t test_vsrl_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vsrl_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vsrl_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vsrl_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vsrl_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vsrl_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { +vuint8m4_t test_vsrl_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vsrl_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vsrl_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vsrl_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { return __riscv_vsrl_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vsrl_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { +vuint8m8_t test_vsrl_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vsrl_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vsrl_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vsrl_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vsrl_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vsrl_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vsrl_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vsrl_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vsrl_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vsrl_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vsrl_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vsrl_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vsrl_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vsrl_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vsrl_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vsrl_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vsrl_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vsrl_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vsrl_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vsrl_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vsrl_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vsrl_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vsrl_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vsrl_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vsrl_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vsrl_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vsrl_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vsrl_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vsrl_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vsrl_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vsrl_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vsrl_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vsrl_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vsrl_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vsrl_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vsrl_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vsrl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vsrl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vsrl_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vsrl_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vsrl_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vsrl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vsrl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vsrl_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vsrl_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vsrl_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vsrl_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vsrl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vsrl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vsrl_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vsrl_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vsrl_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vsrl_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vsrl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vsrl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vsrl_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vsrl_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vsrl_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vsrl_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vsrl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vsrl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vsrl_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vsrl_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vsrl_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vsrl_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vsrl_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vsrl_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vsrl_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vsrl_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vsrl_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vsrl_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vsrl_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vsrl_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vsrl_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vsrl_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vsrl_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vsrl_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vsrl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vsrl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vsrl_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vsrl_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vsrl_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vsrl_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vsrl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vsrl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vsrl_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vsrl_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vsrl_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vsrl_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vsrl_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vsrl_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vsrl_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsrl_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { +vuint8mf8_t test_vsrl_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsrl_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vsrl_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vsrl_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsrl_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { +vuint8mf4_t test_vsrl_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsrl_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vsrl_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vsrl_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsrl_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { +vuint8mf2_t test_vsrl_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsrl_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vsrl_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vsrl_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsrl_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { +vuint8m1_t test_vsrl_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsrl_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vsrl_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vsrl_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsrl_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { +vuint8m2_t test_vsrl_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsrl_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vsrl_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vsrl_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsrl_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { +vuint8m4_t test_vsrl_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsrl_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vsrl_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vsrl_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsrl_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { +vuint8m8_t test_vsrl_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsrl_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vsrl_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vsrl_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsrl_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vsrl_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsrl_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vsrl_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vsrl_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsrl_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vsrl_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsrl_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vsrl_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vsrl_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsrl_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vsrl_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsrl_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vsrl_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vsrl_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsrl_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vsrl_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsrl_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vsrl_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vsrl_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsrl_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vsrl_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsrl_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vsrl_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vsrl_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsrl_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vsrl_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsrl_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vsrl_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vsrl_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsrl_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vsrl_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsrl_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vsrl_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vsrl_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsrl_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vsrl_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsrl_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vsrl_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vsrl_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsrl_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vsrl_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsrl_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vsrl_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vsrl_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsrl_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vsrl_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsrl_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vsrl_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vsrl_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsrl_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vsrl_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsrl_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vsrl_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vsrl_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsrl_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vsrl_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsrl_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vsrl_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vsrl_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsrl_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vsrl_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsrl_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vsrl_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vsrl_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsrl_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vsrl_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsrl_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vsrl_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vsrl_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsrl_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vsrl_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vsrl_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vsrl_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vsrl_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsrl_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { +vuint8mf8_t test_vsrl_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsrl_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vsrl_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vsrl_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsrl_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { +vuint8mf4_t test_vsrl_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsrl_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vsrl_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vsrl_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsrl_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { +vuint8mf2_t test_vsrl_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsrl_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vsrl_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vsrl_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsrl_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { +vuint8m1_t test_vsrl_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsrl_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vsrl_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vsrl_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsrl_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { +vuint8m2_t test_vsrl_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsrl_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vsrl_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vsrl_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsrl_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { +vuint8m4_t test_vsrl_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsrl_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vsrl_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vsrl_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsrl_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { +vuint8m8_t test_vsrl_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsrl_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vsrl_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vsrl_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsrl_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vsrl_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsrl_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vsrl_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vsrl_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsrl_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vsrl_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsrl_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vsrl_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vsrl_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsrl_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vsrl_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsrl_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vsrl_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vsrl_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsrl_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vsrl_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsrl_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vsrl_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vsrl_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsrl_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vsrl_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsrl_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vsrl_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vsrl_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsrl_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vsrl_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsrl_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vsrl_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vsrl_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsrl_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vsrl_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsrl_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vsrl_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vsrl_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsrl_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vsrl_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsrl_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vsrl_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vsrl_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsrl_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vsrl_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsrl_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vsrl_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vsrl_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsrl_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vsrl_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsrl_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vsrl_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vsrl_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsrl_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vsrl_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsrl_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vsrl_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vsrl_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsrl_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vsrl_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsrl_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vsrl_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vsrl_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsrl_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vsrl_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsrl_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vsrl_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vsrl_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsrl_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vsrl_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsrl_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vsrl_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vsrl_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsrl_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vsrl_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vsrl_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vsrl_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vsrl_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsrl_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { +vuint8mf8_t test_vsrl_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsrl_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vsrl_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vsrl_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsrl_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { +vuint8mf4_t test_vsrl_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsrl_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vsrl_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vsrl_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsrl_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { +vuint8mf2_t test_vsrl_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsrl_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vsrl_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vsrl_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsrl_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { +vuint8m1_t test_vsrl_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsrl_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vsrl_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vsrl_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsrl_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { +vuint8m2_t test_vsrl_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsrl_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vsrl_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vsrl_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsrl_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { +vuint8m4_t test_vsrl_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsrl_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vsrl_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vsrl_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsrl_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { +vuint8m8_t test_vsrl_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsrl_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vsrl_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vsrl_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsrl_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vsrl_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsrl_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vsrl_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vsrl_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsrl_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vsrl_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsrl_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vsrl_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vsrl_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsrl_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vsrl_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsrl_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vsrl_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vsrl_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsrl_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vsrl_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsrl_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vsrl_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vsrl_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsrl_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vsrl_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsrl_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vsrl_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vsrl_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsrl_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vsrl_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsrl_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vsrl_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vsrl_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsrl_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vsrl_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vsrl_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsrl_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vsrl_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vsrl_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsrl_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vsrl_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsrl_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vsrl_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vsrl_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsrl_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vsrl_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsrl_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vsrl_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vsrl_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsrl_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vsrl_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsrl_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vsrl_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vsrl_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsrl_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vsrl_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsrl_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vsrl_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vsrl_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsrl_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vsrl_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsrl_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vsrl_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vsrl_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsrl_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vsrl_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsrl_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vsrl_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vsrl_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsrl_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vsrl_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsrl_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vsrl_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vsrl_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsrl_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vsrl_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vsrl_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vssra.c b/auto-generated/policy_funcs/llvm-api-tests/vssra.c index 8a636f455..508444f8b 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vssra.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vssra.c @@ -5,706 +5,891 @@ #include -vint8mf8_t test_vssra_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vssra_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vssra_vv_i8mf8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vssra_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { +vint8mf8_t test_vssra_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, + size_t vl) { return __riscv_vssra_vx_i8mf8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vssra_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vssra_vv_i8mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { +vint8mf4_t test_vssra_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vssra_vx_i8mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vssra_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vssra_vv_i8mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { +vint8mf2_t test_vssra_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vssra_vx_i8mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vint8m1_t test_vssra_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vssra_vv_i8m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { +vint8m1_t test_vssra_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vssra_vx_i8m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vint8m2_t test_vssra_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vssra_vv_i8m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { +vint8m2_t test_vssra_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vssra_vx_i8m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vint8m4_t test_vssra_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vssra_vv_i8m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { +vint8m4_t test_vssra_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vssra_vx_i8m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vint8m8_t test_vssra_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { return __riscv_vssra_vv_i8m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { +vint8m8_t test_vssra_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vssra_vx_i8m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vssra_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vssra_vv_i16mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { +vint16mf4_t test_vssra_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vssra_vx_i16mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vssra_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vssra_vv_i16mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { +vint16mf2_t test_vssra_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vssra_vx_i16mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vssra_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vssra_vv_i16m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { +vint16m1_t test_vssra_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vssra_vx_i16m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vssra_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vssra_vv_i16m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { +vint16m2_t test_vssra_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vssra_vx_i16m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vssra_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vssra_vv_i16m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { +vint16m4_t test_vssra_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vssra_vx_i16m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vint16m8_t test_vssra_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vssra_vv_i16m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { +vint16m8_t test_vssra_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vssra_vx_i16m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vssra_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vssra_vv_i32mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { +vint32mf2_t test_vssra_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vssra_vx_i32mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vint32m1_t test_vssra_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vssra_vv_i32m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { +vint32m1_t test_vssra_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vssra_vx_i32m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vint32m2_t test_vssra_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vssra_vv_i32m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { +vint32m2_t test_vssra_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vssra_vx_i32m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vint32m4_t test_vssra_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vssra_vv_i32m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { +vint32m4_t test_vssra_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vssra_vx_i32m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vint32m8_t test_vssra_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vssra_vv_i32m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { +vint32m8_t test_vssra_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vssra_vx_i32m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vint64m1_t test_vssra_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vssra_vv_i64m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { +vint64m1_t test_vssra_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vssra_vx_i64m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vint64m2_t test_vssra_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vssra_vv_i64m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { +vint64m2_t test_vssra_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vssra_vx_i64m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vint64m4_t test_vssra_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vssra_vv_i64m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { +vint64m4_t test_vssra_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vssra_vx_i64m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vint64m8_t test_vssra_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vssra_vv_i64m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { +vint64m8_t test_vssra_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vssra_vx_i64m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vssra_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vssra_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vssra_vv_i8mf8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vssra_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { +vint8mf8_t test_vssra_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i8mf8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vssra_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vssra_vv_i8mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { +vint8mf4_t test_vssra_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i8mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vssra_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vssra_vv_i8mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { +vint8mf2_t test_vssra_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i8mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vint8m1_t test_vssra_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vssra_vv_i8m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { +vint8m1_t test_vssra_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i8m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vint8m2_t test_vssra_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vssra_vv_i8m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { +vint8m2_t test_vssra_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i8m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vint8m4_t test_vssra_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vssra_vv_i8m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { +vint8m4_t test_vssra_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i8m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vint8m8_t test_vssra_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vssra_vv_i8m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { +vint8m8_t test_vssra_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i8m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vssra_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vssra_vv_i16mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { +vint16mf4_t test_vssra_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vssra_vx_i16mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vssra_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vssra_vv_i16mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { +vint16mf2_t test_vssra_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vssra_vx_i16mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vssra_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vssra_vv_i16m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { +vint16m1_t test_vssra_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i16m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vssra_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vssra_vv_i16m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { +vint16m2_t test_vssra_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i16m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vssra_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vssra_vv_i16m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { +vint16m4_t test_vssra_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i16m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vint16m8_t test_vssra_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vssra_vv_i16m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { +vint16m8_t test_vssra_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i16m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vssra_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vssra_vv_i32mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { +vint32mf2_t test_vssra_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vssra_vx_i32mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vint32m1_t test_vssra_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vssra_vv_i32m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { +vint32m1_t test_vssra_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i32m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vint32m2_t test_vssra_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vssra_vv_i32m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { +vint32m2_t test_vssra_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i32m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vint32m4_t test_vssra_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vssra_vv_i32m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { +vint32m4_t test_vssra_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i32m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vint32m8_t test_vssra_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vssra_vv_i32m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { +vint32m8_t test_vssra_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i32m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vint64m1_t test_vssra_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vssra_vv_i64m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { +vint64m1_t test_vssra_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i64m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vint64m2_t test_vssra_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vssra_vv_i64m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { +vint64m2_t test_vssra_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i64m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vint64m4_t test_vssra_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vssra_vv_i64m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { +vint64m4_t test_vssra_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i64m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vint64m8_t test_vssra_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vssra_vv_i64m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { +vint64m8_t test_vssra_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i64m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vssra_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vssra_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vssra_vv_i8mf8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vssra_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { +vint8mf8_t test_vssra_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i8mf8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vssra_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vssra_vv_i8mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { +vint8mf4_t test_vssra_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i8mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vssra_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vssra_vv_i8mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { +vint8mf2_t test_vssra_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i8mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vint8m1_t test_vssra_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vssra_vv_i8m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { +vint8m1_t test_vssra_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i8m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vint8m2_t test_vssra_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vssra_vv_i8m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { +vint8m2_t test_vssra_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i8m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vint8m4_t test_vssra_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vssra_vv_i8m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { +vint8m4_t test_vssra_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i8m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vint8m8_t test_vssra_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vssra_vv_i8m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { +vint8m8_t test_vssra_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i8m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vssra_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vssra_vv_i16mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { +vint16mf4_t test_vssra_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vssra_vx_i16mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vssra_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vssra_vv_i16mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { +vint16mf2_t test_vssra_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vssra_vx_i16mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vssra_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vssra_vv_i16m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { +vint16m1_t test_vssra_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i16m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vssra_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vssra_vv_i16m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { +vint16m2_t test_vssra_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i16m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vssra_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vssra_vv_i16m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { +vint16m4_t test_vssra_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i16m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vint16m8_t test_vssra_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vssra_vv_i16m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { +vint16m8_t test_vssra_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i16m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vssra_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vssra_vv_i32mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { +vint32mf2_t test_vssra_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vssra_vx_i32mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vint32m1_t test_vssra_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vssra_vv_i32m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { +vint32m1_t test_vssra_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i32m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vint32m2_t test_vssra_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vssra_vv_i32m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { +vint32m2_t test_vssra_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i32m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vint32m4_t test_vssra_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vssra_vv_i32m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { +vint32m4_t test_vssra_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i32m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vint32m8_t test_vssra_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vssra_vv_i32m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { +vint32m8_t test_vssra_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i32m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vint64m1_t test_vssra_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vssra_vv_i64m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { +vint64m1_t test_vssra_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i64m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vint64m2_t test_vssra_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vssra_vv_i64m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { +vint64m2_t test_vssra_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i64m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vint64m4_t test_vssra_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vssra_vv_i64m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { +vint64m4_t test_vssra_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i64m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vint64m8_t test_vssra_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vssra_vv_i64m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { +vint64m8_t test_vssra_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i64m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vssra_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vssra_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vssra_vv_i8mf8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf8_t test_vssra_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, size_t rs1, size_t vl) { +vint8mf8_t test_vssra_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i8mf8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vssra_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vssra_vv_i8mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf4_t test_vssra_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, size_t rs1, size_t vl) { +vint8mf4_t test_vssra_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i8mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vssra_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vssra_vv_i8mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8mf2_t test_vssra_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, size_t rs1, size_t vl) { +vint8mf2_t test_vssra_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i8mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vint8m1_t test_vssra_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vssra_vv_i8m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m1_t test_vssra_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, size_t rs1, size_t vl) { +vint8m1_t test_vssra_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i8m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vint8m2_t test_vssra_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vssra_vv_i8m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m2_t test_vssra_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, size_t rs1, size_t vl) { +vint8m2_t test_vssra_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i8m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vint8m4_t test_vssra_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vssra_vv_i8m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m4_t test_vssra_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, size_t rs1, size_t vl) { +vint8m4_t test_vssra_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i8m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vint8m8_t test_vssra_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vssra_vv_i8m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint8m8_t test_vssra_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, size_t rs1, size_t vl) { +vint8m8_t test_vssra_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i8m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vssra_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vssra_vv_i16mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf4_t test_vssra_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, size_t rs1, size_t vl) { +vint16mf4_t test_vssra_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vssra_vx_i16mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vssra_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vssra_vv_i16mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16mf2_t test_vssra_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, size_t rs1, size_t vl) { +vint16mf2_t test_vssra_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vssra_vx_i16mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vint16m1_t test_vssra_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vssra_vv_i16m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m1_t test_vssra_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, size_t rs1, size_t vl) { +vint16m1_t test_vssra_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i16m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vint16m2_t test_vssra_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vssra_vv_i16m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m2_t test_vssra_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, size_t rs1, size_t vl) { +vint16m2_t test_vssra_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i16m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vint16m4_t test_vssra_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vssra_vv_i16m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m4_t test_vssra_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, size_t rs1, size_t vl) { +vint16m4_t test_vssra_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i16m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vint16m8_t test_vssra_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vssra_vv_i16m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint16m8_t test_vssra_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, size_t rs1, size_t vl) { +vint16m8_t test_vssra_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i16m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vssra_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vssra_vv_i32mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32mf2_t test_vssra_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, size_t rs1, size_t vl) { +vint32mf2_t test_vssra_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vssra_vx_i32mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vint32m1_t test_vssra_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vssra_vv_i32m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m1_t test_vssra_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, size_t rs1, size_t vl) { +vint32m1_t test_vssra_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i32m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vint32m2_t test_vssra_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vssra_vv_i32m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m2_t test_vssra_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, size_t rs1, size_t vl) { +vint32m2_t test_vssra_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i32m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vint32m4_t test_vssra_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vssra_vv_i32m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m4_t test_vssra_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, size_t rs1, size_t vl) { +vint32m4_t test_vssra_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i32m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vint32m8_t test_vssra_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vssra_vv_i32m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint32m8_t test_vssra_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, size_t rs1, size_t vl) { +vint32m8_t test_vssra_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i32m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vint64m1_t test_vssra_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vssra_vv_i64m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m1_t test_vssra_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, size_t rs1, size_t vl) { +vint64m1_t test_vssra_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i64m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vint64m2_t test_vssra_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vssra_vv_i64m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m2_t test_vssra_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, size_t rs1, size_t vl) { +vint64m2_t test_vssra_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i64m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vint64m4_t test_vssra_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vssra_vv_i64m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m4_t test_vssra_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, size_t rs1, size_t vl) { +vint64m4_t test_vssra_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i64m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vint64m8_t test_vssra_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vssra_vv_i64m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vint64m8_t test_vssra_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, size_t rs1, size_t vl) { +vint64m8_t test_vssra_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vssra_vx_i64m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vssrl.c b/auto-generated/policy_funcs/llvm-api-tests/vssrl.c index e2f0644a3..1dbc1cf43 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vssrl.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vssrl.c @@ -5,706 +5,933 @@ #include -vuint8mf8_t test_vssrl_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vssrl_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vssrl_vv_u8mf8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vssrl_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { +vuint8mf8_t test_vssrl_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, + size_t vl) { return __riscv_vssrl_vx_u8mf8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vssrl_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vssrl_vv_u8mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { +vuint8mf4_t test_vssrl_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vssrl_vx_u8mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vssrl_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vssrl_vv_u8mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { +vuint8mf2_t test_vssrl_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vssrl_vx_u8mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vssrl_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vssrl_vv_u8m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { +vuint8m1_t test_vssrl_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vssrl_vx_u8m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vssrl_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vssrl_vv_u8m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { +vuint8m2_t test_vssrl_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vssrl_vx_u8m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vssrl_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vssrl_vv_u8m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { +vuint8m4_t test_vssrl_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vssrl_vx_u8m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vssrl_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { return __riscv_vssrl_vv_u8m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { +vuint8m8_t test_vssrl_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vssrl_vx_u8m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vssrl_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vssrl_vv_u16mf4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vssrl_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl) { return __riscv_vssrl_vx_u16mf4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vssrl_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vssrl_vv_u16mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vssrl_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vssrl_vx_u16mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vssrl_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vssrl_vv_u16m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vssrl_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vssrl_vx_u16m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vssrl_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vssrl_vv_u16m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vssrl_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vssrl_vx_u16m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vssrl_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vssrl_vv_u16m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vssrl_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vssrl_vx_u16m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vssrl_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vssrl_vv_u16m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vssrl_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vssrl_vx_u16m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vssrl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vssrl_vv_u32mf2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vssrl_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl) { return __riscv_vssrl_vx_u32mf2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vssrl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vssrl_vv_u32m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vssrl_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vssrl_vx_u32m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vssrl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vssrl_vv_u32m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vssrl_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vssrl_vx_u32m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vssrl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vssrl_vv_u32m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vssrl_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vssrl_vx_u32m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vssrl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vssrl_vv_u32m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vssrl_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vssrl_vx_u32m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vssrl_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vssrl_vv_u64m1_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vssrl_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, + size_t vl) { return __riscv_vssrl_vx_u64m1_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vssrl_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vssrl_vv_u64m2_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vssrl_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, + size_t vl) { return __riscv_vssrl_vx_u64m2_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vssrl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vssrl_vv_u64m4_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vssrl_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, + size_t vl) { return __riscv_vssrl_vx_u64m4_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vssrl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vssrl_vv_u64m8_tu(vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vssrl_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, + size_t vl) { return __riscv_vssrl_vx_u64m8_tu(vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vssrl_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vssrl_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vssrl_vv_u8mf8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vssrl_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { +vuint8mf8_t test_vssrl_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u8mf8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vssrl_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vssrl_vv_u8mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { +vuint8mf4_t test_vssrl_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u8mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vssrl_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vssrl_vv_u8mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { +vuint8mf2_t test_vssrl_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u8mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vssrl_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vssrl_vv_u8m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { +vuint8m1_t test_vssrl_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vssrl_vx_u8m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vssrl_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vssrl_vv_u8m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { +vuint8m2_t test_vssrl_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vssrl_vx_u8m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vssrl_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vssrl_vv_u8m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { +vuint8m4_t test_vssrl_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vssrl_vx_u8m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vssrl_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vssrl_vv_u8m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { +vuint8m8_t test_vssrl_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vssrl_vx_u8m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vssrl_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vssrl_vv_u16mf4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vssrl_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u16mf4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vssrl_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vssrl_vv_u16mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vssrl_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u16mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vssrl_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vssrl_vv_u16m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vssrl_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u16m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vssrl_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vssrl_vv_u16m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vssrl_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u16m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vssrl_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vssrl_vv_u16m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vssrl_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u16m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vssrl_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vssrl_vv_u16m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vssrl_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u16m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vssrl_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vssrl_vv_u32mf2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vssrl_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u32mf2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vssrl_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vssrl_vv_u32m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vssrl_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u32m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vssrl_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vssrl_vv_u32m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vssrl_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u32m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vssrl_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vssrl_vv_u32m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vssrl_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u32m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vssrl_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vssrl_vv_u32m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vssrl_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u32m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vssrl_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vssrl_vv_u64m1_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vssrl_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u64m1_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vssrl_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vssrl_vv_u64m2_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vssrl_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u64m2_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vssrl_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vssrl_vv_u64m4_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vssrl_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u64m4_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vssrl_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vssrl_vv_u64m8_tum(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vssrl_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u64m8_tum(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vssrl_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vssrl_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vssrl_vv_u8mf8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vssrl_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { +vuint8mf8_t test_vssrl_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u8mf8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vssrl_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vssrl_vv_u8mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { +vuint8mf4_t test_vssrl_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u8mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vssrl_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vssrl_vv_u8mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { +vuint8mf2_t test_vssrl_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u8mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vssrl_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vssrl_vv_u8m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { +vuint8m1_t test_vssrl_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vssrl_vx_u8m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vssrl_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vssrl_vv_u8m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { +vuint8m2_t test_vssrl_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vssrl_vx_u8m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vssrl_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vssrl_vv_u8m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { +vuint8m4_t test_vssrl_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vssrl_vx_u8m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vssrl_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vssrl_vv_u8m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { +vuint8m8_t test_vssrl_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vssrl_vx_u8m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vssrl_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vssrl_vv_u16mf4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vssrl_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, + size_t vl) { return __riscv_vssrl_vx_u16mf4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vssrl_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vssrl_vv_u16mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vssrl_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vssrl_vx_u16mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vssrl_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vssrl_vv_u16m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vssrl_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u16m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vssrl_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vssrl_vv_u16m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vssrl_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u16m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vssrl_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vssrl_vv_u16m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vssrl_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u16m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vssrl_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vssrl_vv_u16m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vssrl_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u16m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vssrl_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vssrl_vv_u32mf2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vssrl_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, + size_t vl) { return __riscv_vssrl_vx_u32mf2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vssrl_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vssrl_vv_u32m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vssrl_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u32m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vssrl_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vssrl_vv_u32m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vssrl_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u32m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vssrl_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vssrl_vv_u32m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vssrl_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u32m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vssrl_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vssrl_vv_u32m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vssrl_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u32m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vssrl_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vssrl_vv_u64m1_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vssrl_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u64m1_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vssrl_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vssrl_vv_u64m2_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vssrl_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u64m2_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vssrl_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vssrl_vv_u64m4_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vssrl_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u64m4_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vssrl_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vssrl_vv_u64m8_tumu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vssrl_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u64m8_tumu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vssrl_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vssrl_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vssrl_vv_u8mf8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf8_t test_vssrl_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { +vuint8mf8_t test_vssrl_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u8mf8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vssrl_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vssrl_vv_u8mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf4_t test_vssrl_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { +vuint8mf4_t test_vssrl_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u8mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vssrl_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vssrl_vv_u8mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8mf2_t test_vssrl_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { +vuint8mf2_t test_vssrl_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u8mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vssrl_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vssrl_vv_u8m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m1_t test_vssrl_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { +vuint8m1_t test_vssrl_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { return __riscv_vssrl_vx_u8m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vssrl_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vssrl_vv_u8m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m2_t test_vssrl_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { +vuint8m2_t test_vssrl_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vssrl_vx_u8m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vssrl_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vssrl_vv_u8m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m4_t test_vssrl_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { +vuint8m4_t test_vssrl_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vssrl_vx_u8m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vssrl_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vssrl_vv_u8m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint8m8_t test_vssrl_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { +vuint8m8_t test_vssrl_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vssrl_vx_u8m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vssrl_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vssrl_vv_u16mf4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf4_t test_vssrl_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { +vuint16mf4_t test_vssrl_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u16mf4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vssrl_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vssrl_vv_u16mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16mf2_t test_vssrl_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { +vuint16mf2_t test_vssrl_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u16mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vssrl_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vssrl_vv_u16m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m1_t test_vssrl_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { +vuint16m1_t test_vssrl_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u16m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vssrl_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vssrl_vv_u16m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m2_t test_vssrl_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { +vuint16m2_t test_vssrl_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { return __riscv_vssrl_vx_u16m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vssrl_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vssrl_vv_u16m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m4_t test_vssrl_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { +vuint16m4_t test_vssrl_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vssrl_vx_u16m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vssrl_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vssrl_vv_u16m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint16m8_t test_vssrl_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { +vuint16m8_t test_vssrl_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vssrl_vx_u16m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vssrl_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vssrl_vv_u32mf2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32mf2_t test_vssrl_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { +vuint32mf2_t test_vssrl_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u32mf2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vssrl_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vssrl_vv_u32m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m1_t test_vssrl_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { +vuint32m1_t test_vssrl_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u32m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vssrl_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vssrl_vv_u32m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m2_t test_vssrl_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { +vuint32m2_t test_vssrl_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u32m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vssrl_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vssrl_vv_u32m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m4_t test_vssrl_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { +vuint32m4_t test_vssrl_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { return __riscv_vssrl_vx_u32m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vssrl_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vssrl_vv_u32m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint32m8_t test_vssrl_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { +vuint32m8_t test_vssrl_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vssrl_vx_u32m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vssrl_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vssrl_vv_u64m1_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m1_t test_vssrl_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { +vuint64m1_t test_vssrl_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u64m1_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vssrl_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vssrl_vv_u64m2_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m2_t test_vssrl_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { +vuint64m2_t test_vssrl_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u64m2_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vssrl_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vssrl_vv_u64m4_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m4_t test_vssrl_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { +vuint64m4_t test_vssrl_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { return __riscv_vssrl_vx_u64m4_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vssrl_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vssrl_vv_u64m8_mu(vm, vd, vs2, vs1, __RISCV_VXRM_RNU, vl); } -vuint64m8_t test_vssrl_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { +vuint64m8_t test_vssrl_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { return __riscv_vssrl_vx_u64m8_mu(vm, vd, vs2, rs1, __RISCV_VXRM_RNU, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vssub.c b/auto-generated/policy_funcs/llvm-api-tests/vssub.c index 2abb1a006..e57019f8c 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vssub.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vssub.c @@ -5,706 +5,891 @@ #include -vint8mf8_t test_vssub_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vssub_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, + size_t vl) { return __riscv_vssub_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vssub_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vssub_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vssub_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vssub_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vssub_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, + size_t vl) { return __riscv_vssub_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vssub_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vssub_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vssub_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vssub_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vssub_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, + size_t vl) { return __riscv_vssub_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vssub_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vssub_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vssub_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vssub_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vssub_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vssub_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vssub_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vssub_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, + size_t vl) { return __riscv_vssub_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vssub_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vssub_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, + size_t vl) { return __riscv_vssub_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vssub_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vssub_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vssub_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vssub_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vssub_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, + size_t vl) { return __riscv_vssub_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vssub_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vssub_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vssub_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vssub_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vssub_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, + size_t vl) { return __riscv_vssub_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vssub_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vssub_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vssub_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vssub_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vssub_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + vint16mf4_t vs1, size_t vl) { return __riscv_vssub_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vssub_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vssub_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vssub_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vssub_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vssub_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vssub_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vssub_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vssub_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vssub_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vssub_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vssub_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vssub_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vssub_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vssub_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, + size_t vl) { return __riscv_vssub_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vssub_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vssub_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, + size_t vl) { return __riscv_vssub_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vssub_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vssub_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vssub_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vssub_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vssub_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, + size_t vl) { return __riscv_vssub_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vssub_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vssub_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vssub_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vssub_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vssub_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, + size_t vl) { return __riscv_vssub_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vssub_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vssub_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, + size_t vl) { return __riscv_vssub_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vssub_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vssub_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vssub_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vssub_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vssub_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vssub_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vssub_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vssub_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vssub_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vssub_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vssub_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, + size_t vl) { return __riscv_vssub_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vssub_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vssub_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, + size_t vl) { return __riscv_vssub_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vssub_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vssub_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vssub_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vssub_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vssub_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, + size_t vl) { return __riscv_vssub_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vssub_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vssub_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, + size_t vl) { return __riscv_vssub_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vssub_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vssub_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, + size_t vl) { return __riscv_vssub_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vssub_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vssub_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, + size_t vl) { return __riscv_vssub_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vssub_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vssub_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vssub_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vssub_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vssub_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, + size_t vl) { return __riscv_vssub_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vssub_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vssub_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, + size_t vl) { return __riscv_vssub_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vssub_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vssub_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, + size_t vl) { return __riscv_vssub_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vssub_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vssub_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, + size_t vl) { return __riscv_vssub_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vssub_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vssub_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, + size_t vl) { return __riscv_vssub_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vssub_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vssub_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, + size_t vl) { return __riscv_vssub_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vssub_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vssub_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, + size_t vl) { return __riscv_vssub_vx_i64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vssub_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vssub_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vssub_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vssub_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vssub_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vssub_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vssub_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vssub_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vssub_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vssub_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vssub_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vssub_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vssub_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vssub_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vssub_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vssub_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vssub_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vssub_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vssub_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vssub_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vssub_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vssub_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vssub_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vssub_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vssub_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vssub_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vssub_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vssub_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vssub_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vssub_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vssub_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vssub_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vssub_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vssub_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vssub_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vssub_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vssub_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vssub_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vssub_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vssub_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vssub_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vssub_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vssub_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vssub_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vssub_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vssub_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vssub_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vssub_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vssub_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vssub_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vssub_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vssub_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vssub_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vssub_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vssub_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vssub_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vssub_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vssub_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vssub_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vssub_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vssub_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vssub_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vssub_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vssub_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vssub_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vssub_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vssub_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vssub_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vssub_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vssub_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vssub_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vssub_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vssub_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vssub_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vssub_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vssub_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vssub_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vssub_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vssub_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vssub_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vssub_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vssub_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vssub_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vssub_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vssub_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vssub_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vssub_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vssub_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vssub_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vssub_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vssub_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vssub_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vssub_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vssub_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vssub_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vssub_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vssub_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vssub_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vssub_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vssub_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vssub_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vssub_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vssub_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vssub_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vssub_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vssub_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vssub_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vssub_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vssub_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vssub_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vssub_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vssub_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vssub_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vssub_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vssub_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vssub_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vssub_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vssub_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vssub_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vssub_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vssub_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vssub_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vssub_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vssub_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vssub_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vssub_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vssub_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vssub_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vssub_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vssub_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vssub_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vssub_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vssub_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vssub_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vssub_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vssub_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vssub_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vssub_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vssub_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vssub_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vssub_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vssub_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vssub_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vssub_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vssub_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vssub_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vssub_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vssub_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vssub_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vssub_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vssub_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vssub_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vssub_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vssub_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vssub_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vssub_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vssub_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vssub_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vssub_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vssub_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vssub_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vssub_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vssub_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vssub_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vssub_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vssub_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vssub_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vssub_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vssub_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vssub_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vssub_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vssub_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vssub_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vssub_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vssub_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vssub_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vssub_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vssub_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vssub_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vssub_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vssub_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vssub_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vssub_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vssub_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vssub_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vssub_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vssub_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vssub_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vssub_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vssub_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vssub_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vssub_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vssub_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vssub_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vssub_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vssub_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vssub_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vssub_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vssub_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vssub_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vssub_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vssub_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vssub_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vssub_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vssub_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vssub_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vssub_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vssub_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vssub_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vssub_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vssub_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vssub_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vssub_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vssub_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vssub_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vssub_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vssub_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vssub_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vssub_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vssub_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vssub_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vssub_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vssub_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vssub_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vssub_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vssub_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vssub_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vssub_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vssub_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vssub_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vssub_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vssub_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vssub_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vssub_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vssub_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vssub_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vssub_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vssub_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vssub_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vssub_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vssub_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vssub_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vssub_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vssub_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vssub_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vssub_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vssub_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vssub_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vssub_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vssub_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vssub_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vssub_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vssub_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vssub_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vssub_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vssub_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vssub_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vssub_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vssub_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vssub_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vssub_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vssub_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vssub_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vssub_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vssub_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vssub_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vssub_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vssub_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vssub_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vssub_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vssub_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vssub_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vssub_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vssub_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vssub_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vssub_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vssub_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vssub_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vssub_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vssub_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vssub_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vssub_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vssub_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vssub_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vssub_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vssub_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vssub_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vssub_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vssub_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vssub_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vssub_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vssub_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vssub_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vssub_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vssub_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vssub_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vssub_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vssub_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vssub_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vssub_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vssub_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vssub_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vssub_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vssub_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vssub_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vssub_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vssub_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vssub_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vssub_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vssub_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vssub_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vssub_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vssub_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vssub_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vssub_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vssub_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vssub_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vssub_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vssub_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vssub_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vssub_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vssub_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vssub_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vssub_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vssub_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vssub_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vssub_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vssub_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vssub_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vssub_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vssub_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vssub_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vssub_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vssub_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vssub_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vssub_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vssub_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vssub_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vssub_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vssub_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vssub_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vssub_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vssub_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vssub_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vssub_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vssub_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vssub_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vssub_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vssub_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vssub_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vssub_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vssub_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vssub_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vssub_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vssub_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vssub_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vssub_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vssub_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vssub_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vssub_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vssub_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vssub_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vssub_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vssub_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vssub_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vssub_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vssub_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vssub_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vssub_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vssub_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vssub_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vssub_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vssub_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vssub_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vssub_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vssub_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vssub_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vssub_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vssub_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vssub_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vssub_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vssub_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vssub_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vssub_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vssub_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vssub_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vssub_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vssub_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vssub_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vssub_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vssub_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vssub_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vssub_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vssub_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vssub_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vssub_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vssubu.c b/auto-generated/policy_funcs/llvm-api-tests/vssubu.c index ad110849e..398d76664 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vssubu.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vssubu.c @@ -5,706 +5,957 @@ #include -vuint8mf8_t test_vssubu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vssubu_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vssubu_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vssubu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vssubu_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vssubu_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vssubu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vssubu_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vssubu_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vssubu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vssubu_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vssubu_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vssubu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vssubu_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vssubu_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vssubu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vssubu_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vssubu_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vssubu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vssubu_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vssubu_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vssubu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vssubu_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vssubu_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vssubu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vssubu_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vssubu_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vssubu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vssubu_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vssubu_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vssubu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vssubu_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vssubu_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vssubu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vssubu_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vssubu_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vssubu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vssubu_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { return __riscv_vssubu_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vssubu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vssubu_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vssubu_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vssubu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vssubu_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vssubu_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vssubu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vssubu_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vssubu_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vssubu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vssubu_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vssubu_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vssubu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vssubu_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vssubu_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vssubu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vssubu_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vssubu_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vssubu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vssubu_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vssubu_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vssubu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vssubu_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vssubu_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vssubu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vssubu_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vssubu_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vssubu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vssubu_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vssubu_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vssubu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vssubu_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vssubu_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vssubu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vssubu_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vssubu_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vssubu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vssubu_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vssubu_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vssubu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vssubu_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vssubu_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vssubu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vssubu_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vssubu_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vssubu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vssubu_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vssubu_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vssubu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vssubu_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vssubu_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vssubu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vssubu_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vssubu_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vssubu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vssubu_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vssubu_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vssubu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vssubu_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vssubu_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vssubu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vssubu_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vssubu_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vssubu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vssubu_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vssubu_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vssubu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vssubu_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vssubu_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vssubu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vssubu_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vssubu_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vssubu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vssubu_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vssubu_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vssubu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vssubu_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vssubu_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vssubu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vssubu_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vssubu_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vssubu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vssubu_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vssubu_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vssubu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vssubu_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vssubu_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vssubu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vssubu_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vssubu_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vssubu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vssubu_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vssubu_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vssubu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vssubu_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vssubu_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vssubu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vssubu_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vssubu_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vssubu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vssubu_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vssubu_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vssubu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vssubu_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vssubu_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vssubu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vssubu_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vssubu_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vssubu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vssubu_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vssubu_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vssubu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vssubu_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vssubu_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vssubu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vssubu_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vssubu_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vssubu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vssubu_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vssubu_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vssubu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vssubu_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vssubu_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vssubu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vssubu_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vssubu_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vssubu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vssubu_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vssubu_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vssubu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vssubu_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vssubu_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vssubu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vssubu_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vssubu_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vssubu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vssubu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vssubu_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vssubu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vssubu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vssubu_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vssubu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vssubu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vssubu_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vssubu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vssubu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vssubu_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vssubu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vssubu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vssubu_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vssubu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vssubu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vssubu_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vssubu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vssubu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vssubu_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vssubu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vssubu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vssubu_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vssubu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vssubu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vssubu_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vssubu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vssubu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vssubu_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vssubu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vssubu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vssubu_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vssubu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vssubu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vssubu_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vssubu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vssubu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vssubu_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vssubu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vssubu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vssubu_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vssubu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vssubu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vssubu_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vssubu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vssubu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vssubu_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vssubu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vssubu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vssubu_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vssubu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vssubu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vssubu_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vssubu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vssubu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vssubu_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vssubu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vssubu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vssubu_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vssubu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vssubu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vssubu_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vssubu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vssubu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { return __riscv_vssubu_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vssubu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vssubu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vssubu_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vssubu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vssubu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vssubu_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vssubu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vssubu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vssubu_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vssubu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vssubu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vssubu_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vssubu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vssubu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vssubu_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vssubu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vssubu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vssubu_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vssubu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vssubu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vssubu_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vssubu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vssubu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vssubu_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vssubu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vssubu_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vssubu_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vssubu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vssubu_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vssubu_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vssubu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vssubu_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vssubu_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vssubu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vssubu_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vssubu_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vssubu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vssubu_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vssubu_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vssubu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vssubu_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vssubu_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vssubu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vssubu_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vssubu_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vssubu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vssubu_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vssubu_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vssubu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vssubu_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vssubu_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vssubu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vssubu_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vssubu_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vssubu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vssubu_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vssubu_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vssubu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vssubu_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vssubu_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vssubu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vssubu_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vssubu_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vssubu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vssubu_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vssubu_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vssubu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vssubu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vssubu_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vssubu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vssubu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vssubu_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vssubu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vssubu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vssubu_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vssubu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vssubu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vssubu_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vssubu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vssubu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vssubu_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vssubu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vssubu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vssubu_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vssubu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vssubu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vssubu_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vssubu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vssubu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vssubu_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vssubu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vssubu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vssubu_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vssubu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vssubu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vssubu_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vssubu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vssubu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vssubu_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vssubu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vssubu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vssubu_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vssubu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vssubu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vssubu_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vssubu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vssubu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vssubu_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vssubu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vssubu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vssubu_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vssubu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vssubu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vssubu_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vssubu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vssubu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vssubu_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vssubu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vssubu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vssubu_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vssubu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vssubu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vssubu_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vssubu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vssubu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vssubu_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vssubu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vssubu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vssubu_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vssubu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vssubu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vssubu_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vssubu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vssubu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vssubu_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vssubu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vssubu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vssubu_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vssubu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vssubu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vssubu_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vssubu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vssubu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vssubu_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vssubu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vssubu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vssubu_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vssubu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vssubu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vssubu_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vssubu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vssubu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vssubu_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vssubu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vssubu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vssubu_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vssubu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vssubu_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vssubu_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vssubu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vssubu_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vssubu_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vssubu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vssubu_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vssubu_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vssubu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vssubu_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vssubu_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vssubu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vssubu_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vssubu_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vssubu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vssubu_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vssubu_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vssubu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vssubu_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vssubu_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vssubu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vssubu_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vssubu_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vssubu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vssubu_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vssubu_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vssubu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vssubu_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vssubu_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vssubu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vssubu_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vssubu_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vssubu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vssubu_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vssubu_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vssubu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vssubu_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vssubu_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vssubu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vssubu_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vssubu_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vssubu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vssubu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vssubu_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vssubu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vssubu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vssubu_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vssubu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vssubu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vssubu_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vssubu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vssubu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vssubu_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vssubu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vssubu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vssubu_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vssubu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vssubu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vssubu_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vssubu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vssubu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vssubu_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vssubu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vssubu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vssubu_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vssubu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vssubu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vssubu_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vssubu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vssubu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vssubu_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vssubu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vssubu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vssubu_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vssubu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vssubu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vssubu_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vssubu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vssubu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vssubu_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vssubu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vssubu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vssubu_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vssubu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vssubu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vssubu_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vssubu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vssubu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vssubu_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vssubu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vssubu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vssubu_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vssubu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vssubu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vssubu_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vssubu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vssubu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vssubu_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vssubu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vssubu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vssubu_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vssubu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vssubu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vssubu_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vssubu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vssubu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { return __riscv_vssubu_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vssubu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vssubu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vssubu_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vssubu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vssubu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vssubu_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vssubu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vssubu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vssubu_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vssubu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vssubu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vssubu_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vssubu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vssubu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vssubu_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vssubu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vssubu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vssubu_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vssubu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vssubu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vssubu_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vssubu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vssubu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vssubu_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vsub.c b/auto-generated/policy_funcs/llvm-api-tests/vsub.c index 7a28b0251..62d512bbc 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vsub.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vsub.c @@ -5,1410 +5,1810 @@ #include -vint8mf8_t test_vsub_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vsub_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, + size_t vl) { return __riscv_vsub_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vsub_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vsub_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vsub_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vsub_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vsub_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, + size_t vl) { return __riscv_vsub_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vsub_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vsub_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vsub_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vsub_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vsub_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, + size_t vl) { return __riscv_vsub_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vsub_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vsub_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vsub_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vsub_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vsub_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vsub_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vsub_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vsub_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, + size_t vl) { return __riscv_vsub_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vsub_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vsub_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, + size_t vl) { return __riscv_vsub_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vsub_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vsub_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vsub_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vsub_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vsub_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, + size_t vl) { return __riscv_vsub_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vsub_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vsub_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vsub_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vsub_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vsub_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, + size_t vl) { return __riscv_vsub_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vsub_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vsub_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vsub_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vsub_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vsub_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + vint16mf4_t vs1, size_t vl) { return __riscv_vsub_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vsub_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vsub_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vsub_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vsub_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vsub_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vsub_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vsub_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vsub_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vsub_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vsub_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vsub_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vsub_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vsub_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vsub_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, + size_t vl) { return __riscv_vsub_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vsub_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vsub_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, + size_t vl) { return __riscv_vsub_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vsub_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vsub_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vsub_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vsub_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vsub_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, + size_t vl) { return __riscv_vsub_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vsub_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vsub_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vsub_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vsub_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vsub_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, + size_t vl) { return __riscv_vsub_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vsub_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vsub_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, + size_t vl) { return __riscv_vsub_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vsub_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vsub_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vsub_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vsub_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vsub_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vsub_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vsub_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vsub_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vsub_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vsub_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vsub_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, + size_t vl) { return __riscv_vsub_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vsub_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vsub_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, + size_t vl) { return __riscv_vsub_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vsub_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vsub_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vsub_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vsub_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vsub_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, + size_t vl) { return __riscv_vsub_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vsub_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vsub_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, + size_t vl) { return __riscv_vsub_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vsub_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vsub_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, + size_t vl) { return __riscv_vsub_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vsub_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vsub_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, + size_t vl) { return __riscv_vsub_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vsub_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vsub_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vsub_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vsub_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vsub_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, + size_t vl) { return __riscv_vsub_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vsub_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vsub_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, + size_t vl) { return __riscv_vsub_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vsub_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vsub_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, + size_t vl) { return __riscv_vsub_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vsub_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vsub_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, + size_t vl) { return __riscv_vsub_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vsub_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vsub_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, + size_t vl) { return __riscv_vsub_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vsub_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vsub_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, + size_t vl) { return __riscv_vsub_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vsub_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vsub_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, + size_t vl) { return __riscv_vsub_vx_i64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vsub_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vsub_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vsub_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vsub_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vsub_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vsub_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vsub_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vsub_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vsub_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vsub_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vsub_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vsub_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vsub_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vsub_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vsub_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vsub_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vsub_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vsub_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vsub_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vsub_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vsub_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vsub_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vsub_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vsub_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vsub_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vsub_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vsub_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vsub_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vsub_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vsub_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vsub_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vsub_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vsub_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vsub_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vsub_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vsub_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vsub_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vsub_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { return __riscv_vsub_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vsub_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vsub_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vsub_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vsub_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vsub_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vsub_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vsub_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vsub_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vsub_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vsub_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vsub_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vsub_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vsub_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vsub_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vsub_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vsub_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vsub_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vsub_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vsub_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vsub_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vsub_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vsub_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vsub_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vsub_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vsub_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vsub_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vsub_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vsub_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vsub_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vsub_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vsub_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vsub_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vsub_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vsub_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vsub_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vsub_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vsub_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vsub_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vsub_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vsub_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vsub_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vsub_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vsub_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vsub_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vsub_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vsub_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vsub_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vsub_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vsub_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vsub_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vsub_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vsub_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vsub_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vsub_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vsub_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vsub_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vsub_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vsub_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vsub_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vsub_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vsub_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vsub_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vsub_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vsub_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vsub_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vsub_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vsub_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vsub_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vsub_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vsub_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vsub_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vsub_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vsub_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vsub_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vsub_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vsub_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vsub_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vsub_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vsub_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vsub_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vsub_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vsub_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vsub_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vsub_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vsub_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vsub_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vsub_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vsub_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vsub_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vsub_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vsub_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vsub_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vsub_vx_u64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vsub_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vsub_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vsub_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsub_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vsub_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsub_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsub_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vsub_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vsub_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsub_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vsub_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsub_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsub_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vsub_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vsub_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsub_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vsub_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsub_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsub_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vsub_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vsub_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsub_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vsub_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsub_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsub_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vsub_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vsub_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsub_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vsub_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsub_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsub_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vsub_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vsub_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsub_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vsub_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsub_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsub_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vsub_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vsub_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsub_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vsub_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsub_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsub_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vsub_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vsub_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsub_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vsub_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vsub_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsub_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vsub_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vsub_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsub_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vsub_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vsub_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsub_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vsub_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vsub_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsub_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vsub_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsub_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsub_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vsub_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vsub_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsub_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vsub_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsub_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsub_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vsub_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vsub_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsub_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vsub_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsub_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsub_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vsub_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vsub_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsub_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vsub_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsub_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsub_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vsub_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vsub_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsub_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vsub_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vsub_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsub_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vsub_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vsub_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsub_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vsub_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsub_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsub_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vsub_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vsub_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsub_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vsub_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsub_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsub_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vsub_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vsub_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsub_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vsub_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsub_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsub_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vsub_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vsub_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsub_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vsub_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsub_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsub_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vsub_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vsub_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsub_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vsub_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsub_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsub_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vsub_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vsub_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsub_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vsub_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsub_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsub_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vsub_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vsub_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsub_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vsub_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsub_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsub_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vsub_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vsub_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsub_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vsub_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsub_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vsub_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vsub_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vsub_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsub_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vsub_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vsub_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsub_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vsub_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vsub_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsub_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vsub_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vsub_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsub_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vsub_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vsub_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsub_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vsub_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vsub_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsub_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vsub_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vsub_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsub_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vsub_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vsub_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsub_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vsub_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vsub_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsub_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vsub_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vsub_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsub_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vsub_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vsub_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsub_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vsub_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vsub_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsub_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vsub_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vsub_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsub_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vsub_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vsub_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsub_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vsub_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vsub_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsub_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vsub_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vsub_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsub_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vsub_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vsub_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsub_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vsub_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vsub_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsub_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vsub_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vsub_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsub_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vsub_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vsub_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsub_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vsub_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vsub_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsub_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vsub_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vsub_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsub_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vsub_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vsub_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsub_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vsub_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vsub_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsub_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vsub_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vsub_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsub_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vsub_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vsub_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsub_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vsub_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vsub_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsub_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vsub_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vsub_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsub_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vsub_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vsub_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsub_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vsub_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vsub_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsub_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vsub_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vsub_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsub_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vsub_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vsub_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsub_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vsub_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vsub_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsub_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vsub_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vsub_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsub_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vsub_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vsub_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsub_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vsub_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vsub_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsub_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vsub_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vsub_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsub_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vsub_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vsub_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsub_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vsub_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vsub_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsub_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vsub_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vsub_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsub_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vsub_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vsub_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsub_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vsub_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vsub_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsub_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vsub_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vsub_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsub_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vsub_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vsub_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vsub_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vsub_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vsub_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsub_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vsub_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsub_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsub_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vsub_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vsub_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsub_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vsub_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsub_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsub_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vsub_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vsub_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsub_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vsub_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsub_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsub_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vsub_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vsub_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsub_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vsub_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsub_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsub_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vsub_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vsub_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsub_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vsub_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsub_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsub_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vsub_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vsub_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsub_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vsub_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsub_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsub_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vsub_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vsub_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsub_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vsub_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsub_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsub_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vsub_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vsub_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsub_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vsub_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vsub_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsub_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vsub_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vsub_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsub_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vsub_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vsub_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsub_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vsub_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vsub_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsub_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vsub_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsub_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsub_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vsub_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vsub_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsub_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vsub_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsub_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsub_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vsub_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vsub_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsub_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vsub_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsub_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsub_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vsub_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vsub_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsub_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vsub_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsub_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsub_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vsub_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vsub_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsub_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vsub_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vsub_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsub_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vsub_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vsub_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsub_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vsub_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsub_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsub_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vsub_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vsub_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsub_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vsub_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsub_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsub_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vsub_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vsub_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsub_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vsub_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsub_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsub_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vsub_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vsub_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsub_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vsub_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsub_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsub_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vsub_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vsub_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsub_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vsub_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsub_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsub_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vsub_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vsub_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsub_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vsub_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsub_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsub_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vsub_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vsub_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsub_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vsub_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsub_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsub_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vsub_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vsub_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsub_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vsub_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsub_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vsub_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vsub_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vsub_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsub_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vsub_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vsub_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsub_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vsub_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vsub_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsub_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vsub_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vsub_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsub_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vsub_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vsub_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsub_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vsub_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vsub_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsub_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vsub_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vsub_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsub_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vsub_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vsub_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsub_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vsub_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vsub_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsub_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vsub_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vsub_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsub_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vsub_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vsub_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsub_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vsub_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vsub_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsub_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vsub_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vsub_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsub_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vsub_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vsub_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsub_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vsub_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vsub_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsub_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vsub_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vsub_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsub_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vsub_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vsub_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsub_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vsub_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vsub_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsub_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vsub_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vsub_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsub_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vsub_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vsub_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsub_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vsub_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vsub_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsub_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vsub_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vsub_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsub_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vsub_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vsub_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsub_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vsub_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vsub_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsub_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vsub_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vsub_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsub_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vsub_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vsub_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsub_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vsub_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vsub_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsub_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vsub_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vsub_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsub_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vsub_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vsub_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsub_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vsub_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vsub_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsub_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vsub_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vsub_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsub_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vsub_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vsub_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsub_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vsub_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vsub_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsub_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vsub_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vsub_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsub_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vsub_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vsub_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsub_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vsub_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { return __riscv_vsub_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsub_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vsub_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vsub_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsub_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vsub_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vsub_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsub_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vsub_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vsub_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsub_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vsub_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vsub_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsub_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vsub_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vsub_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsub_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vsub_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vsub_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsub_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vsub_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vsub_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsub_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vsub_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vsub_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vsub_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vsub_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vsub_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vsub_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vsub_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsub_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vsub_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vsub_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vsub_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vsub_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vsub_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsub_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vsub_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vsub_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vsub_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vsub_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vsub_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsub_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vsub_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vsub_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vsub_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vsub_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vsub_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsub_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vsub_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vsub_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vsub_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vsub_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vsub_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsub_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vsub_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vsub_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vsub_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vsub_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vsub_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsub_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vsub_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vsub_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vsub_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vsub_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vsub_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vsub_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vsub_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vsub_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vsub_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vsub_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vsub_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vsub_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vsub_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vsub_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vsub_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vsub_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vsub_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vsub_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vsub_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vsub_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vsub_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vsub_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vsub_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsub_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vsub_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vsub_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vsub_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vsub_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vsub_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsub_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vsub_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vsub_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vsub_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vsub_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vsub_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsub_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vsub_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vsub_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vsub_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vsub_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vsub_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vsub_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vsub_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vsub_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vsub_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vsub_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vsub_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vsub_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vsub_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vsub_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vsub_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vsub_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vsub_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsub_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vsub_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vsub_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vsub_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vsub_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vsub_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsub_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vsub_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vsub_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vsub_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vsub_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vsub_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsub_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vsub_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vsub_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vsub_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vsub_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vsub_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vsub_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vsub_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vsub_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vsub_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vsub_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vsub_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsub_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vsub_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vsub_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vsub_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vsub_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vsub_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsub_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vsub_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vsub_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vsub_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vsub_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vsub_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsub_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vsub_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vsub_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vsub_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vsub_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vsub_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vsub_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vsub_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vsub_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vsub_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vsub_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vsub_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vsub_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vsub_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vsub_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vsub_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vsub_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vsub_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vsub_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vsub_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vsub_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vsub_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vsub_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vsub_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vsub_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vsub_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vsub_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vsub_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vsub_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vsub_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vsub_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vsub_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vsub_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vsub_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vsub_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vsub_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vsub_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vsub_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vsub_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vsub_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vsub_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vsub_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vsub_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vsub_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vsub_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vsub_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vsub_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vsub_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vsub_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vsub_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vsub_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vsub_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vsub_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vsub_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vsub_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vsub_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vsub_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vsub_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vsub_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vsub_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vsub_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vsub_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vsub_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vsub_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vsub_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vsub_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vsub_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vsub_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vsub_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vsub_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vsub_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vsub_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vsub_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vsub_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vsub_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vsub_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vsub_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vsub_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vsub_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vsub_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vsub_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vsub_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vsub_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vsub_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vsub_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vsub_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vsub_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vsub_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vsub_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vsub_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vsub_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vsub_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vsub_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vsub_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vsub_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vsub_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vsub_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vsub_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vsub_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vsub_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vsub_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vsub_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vsub_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vsub_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vsub_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vsub_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vsub_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vsub_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vsub_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vsub_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vsub_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vsub_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vsub_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vsub_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vsub_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vsub_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vsub_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vsub_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vsub_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vsub_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vsub_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vsub_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vsub_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vsub_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vsub_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vsub_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vsub_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vsub_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vsub_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vsub_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vsub_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vsub_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vsub_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vsub_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vsub_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vsub_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vsub_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vsub_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vsub_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vwadd.c b/auto-generated/policy_funcs/llvm-api-tests/vwadd.c index 545ccb4c5..1f20817e6 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vwadd.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vwadd.c @@ -5,962 +5,1220 @@ #include -vint16mf4_t test_vwadd_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint16mf4_t test_vwadd_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vwadd_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vwadd_vx_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint16mf4_t test_vwadd_vx_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vwadd_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vwadd_wv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { +vint16mf4_t test_vwadd_wv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vwadd_wv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vwadd_wx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int8_t rs1, size_t vl) { +vint16mf4_t test_vwadd_wx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vwadd_wx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vwadd_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint16mf2_t test_vwadd_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vwadd_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vwadd_vx_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint16mf2_t test_vwadd_vx_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vwadd_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vwadd_wv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { +vint16mf2_t test_vwadd_wv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vwadd_wv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vwadd_wx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int8_t rs1, size_t vl) { +vint16mf2_t test_vwadd_wx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vwadd_wx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vwadd_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint16m1_t test_vwadd_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, + size_t vl) { return __riscv_vwadd_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwadd_vx_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint16m1_t test_vwadd_vx_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vwadd_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vwadd_wv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { +vint16m1_t test_vwadd_wv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint8mf2_t vs1, + size_t vl) { return __riscv_vwadd_wv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwadd_wx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int8_t rs1, size_t vl) { +vint16m1_t test_vwadd_wx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int8_t rs1, + size_t vl) { return __riscv_vwadd_wx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vwadd_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint16m2_t test_vwadd_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vwadd_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vwadd_vx_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint16m2_t test_vwadd_vx_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, int8_t rs1, + size_t vl) { return __riscv_vwadd_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vwadd_wv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { +vint16m2_t test_vwadd_wv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vwadd_wv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vwadd_wx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int8_t rs1, size_t vl) { +vint16m2_t test_vwadd_wx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vwadd_wx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vwadd_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint16m4_t test_vwadd_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, + size_t vl) { return __riscv_vwadd_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vwadd_vx_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint16m4_t test_vwadd_vx_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vwadd_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vwadd_wv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint8m2_t vs1, size_t vl) { +vint16m4_t test_vwadd_wv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint8m2_t vs1, + size_t vl) { return __riscv_vwadd_wv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vwadd_wx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int8_t rs1, size_t vl) { +vint16m4_t test_vwadd_wx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vwadd_wx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vwadd_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint16m8_t test_vwadd_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, + size_t vl) { return __riscv_vwadd_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vwadd_vx_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint16m8_t test_vwadd_vx_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vwadd_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vwadd_wv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint8m4_t vs1, size_t vl) { +vint16m8_t test_vwadd_wv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint8m4_t vs1, + size_t vl) { return __riscv_vwadd_wv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vwadd_wx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int8_t rs1, size_t vl) { +vint16m8_t test_vwadd_wx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vwadd_wx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vwadd_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint32mf2_t test_vwadd_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, + vint16mf4_t vs1, size_t vl) { return __riscv_vwadd_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vwadd_vx_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint32mf2_t test_vwadd_vx_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwadd_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vwadd_wv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { +vint32mf2_t test_vwadd_wv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + vint16mf4_t vs1, size_t vl) { return __riscv_vwadd_wv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vwadd_wx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int16_t rs1, size_t vl) { +vint32mf2_t test_vwadd_wx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwadd_wx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vwadd_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint32m1_t test_vwadd_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vwadd_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwadd_vx_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint32m1_t test_vwadd_vx_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vwadd_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vwadd_wv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { +vint32m1_t test_vwadd_wv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vwadd_wv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwadd_wx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int16_t rs1, size_t vl) { +vint32m1_t test_vwadd_wx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int16_t rs1, + size_t vl) { return __riscv_vwadd_wx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vwadd_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint32m2_t test_vwadd_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vwadd_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vwadd_vx_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint32m2_t test_vwadd_vx_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, int16_t rs1, + size_t vl) { return __riscv_vwadd_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vwadd_wv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { +vint32m2_t test_vwadd_wv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vwadd_wv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vwadd_wx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int16_t rs1, size_t vl) { +vint32m2_t test_vwadd_wx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vwadd_wx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vwadd_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint32m4_t test_vwadd_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, + size_t vl) { return __riscv_vwadd_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vwadd_vx_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint32m4_t test_vwadd_vx_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vwadd_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vwadd_wv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint16m2_t vs1, size_t vl) { +vint32m4_t test_vwadd_wv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint16m2_t vs1, + size_t vl) { return __riscv_vwadd_wv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vwadd_wx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int16_t rs1, size_t vl) { +vint32m4_t test_vwadd_wx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vwadd_wx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vwadd_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint32m8_t test_vwadd_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, + size_t vl) { return __riscv_vwadd_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vwadd_vx_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint32m8_t test_vwadd_vx_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vwadd_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vwadd_wv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint16m4_t vs1, size_t vl) { +vint32m8_t test_vwadd_wv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint16m4_t vs1, + size_t vl) { return __riscv_vwadd_wv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vwadd_wx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int16_t rs1, size_t vl) { +vint32m8_t test_vwadd_wx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int16_t rs1, + size_t vl) { return __riscv_vwadd_wx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vwadd_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint64m1_t test_vwadd_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vwadd_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwadd_vx_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint64m1_t test_vwadd_vx_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vwadd_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vwadd_wv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { +vint64m1_t test_vwadd_wv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vwadd_wv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwadd_wx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int32_t rs1, size_t vl) { +vint64m1_t test_vwadd_wx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int32_t rs1, + size_t vl) { return __riscv_vwadd_wx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vwadd_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint64m2_t test_vwadd_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vwadd_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vwadd_vx_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint64m2_t test_vwadd_vx_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, int32_t rs1, + size_t vl) { return __riscv_vwadd_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vwadd_wv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { +vint64m2_t test_vwadd_wv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vwadd_wv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vwadd_wx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int32_t rs1, size_t vl) { +vint64m2_t test_vwadd_wx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vwadd_wx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vwadd_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint64m4_t test_vwadd_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, + size_t vl) { return __riscv_vwadd_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vwadd_vx_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint64m4_t test_vwadd_vx_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vwadd_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vwadd_wv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint32m2_t vs1, size_t vl) { +vint64m4_t test_vwadd_wv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint32m2_t vs1, + size_t vl) { return __riscv_vwadd_wv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vwadd_wx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int32_t rs1, size_t vl) { +vint64m4_t test_vwadd_wx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int32_t rs1, + size_t vl) { return __riscv_vwadd_wx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vwadd_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint64m8_t test_vwadd_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, + size_t vl) { return __riscv_vwadd_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vwadd_vx_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint64m8_t test_vwadd_vx_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, int32_t rs1, + size_t vl) { return __riscv_vwadd_vx_i64m8_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vwadd_wv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint32m4_t vs1, size_t vl) { +vint64m8_t test_vwadd_wv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint32m4_t vs1, + size_t vl) { return __riscv_vwadd_wv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vwadd_wx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int32_t rs1, size_t vl) { +vint64m8_t test_vwadd_wx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int32_t rs1, + size_t vl) { return __riscv_vwadd_wx_i64m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vwadd_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint16mf4_t test_vwadd_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs2, vint8mf8_t vs1, + size_t vl) { return __riscv_vwadd_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwadd_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint16mf4_t test_vwadd_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs2, int8_t rs1, size_t vl) { return __riscv_vwadd_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwadd_wv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { +vint16mf4_t test_vwadd_wv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint8mf8_t vs1, + size_t vl) { return __riscv_vwadd_wv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwadd_wx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int8_t rs1, size_t vl) { +vint16mf4_t test_vwadd_wx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int8_t rs1, size_t vl) { return __riscv_vwadd_wx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwadd_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint16mf2_t test_vwadd_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs2, vint8mf4_t vs1, + size_t vl) { return __riscv_vwadd_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwadd_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint16mf2_t test_vwadd_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs2, int8_t rs1, size_t vl) { return __riscv_vwadd_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwadd_wv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { +vint16mf2_t test_vwadd_wv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint8mf4_t vs1, + size_t vl) { return __riscv_vwadd_wv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwadd_wx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int8_t rs1, size_t vl) { +vint16mf2_t test_vwadd_wx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int8_t rs1, size_t vl) { return __riscv_vwadd_wx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwadd_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint16m1_t test_vwadd_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vwadd_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwadd_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint16m1_t test_vwadd_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwadd_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwadd_wv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { +vint16m1_t test_vwadd_wv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vwadd_wv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwadd_wx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int8_t rs1, size_t vl) { +vint16m1_t test_vwadd_wx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwadd_wx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwadd_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint16m2_t test_vwadd_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vwadd_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwadd_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint16m2_t test_vwadd_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwadd_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwadd_wv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { +vint16m2_t test_vwadd_wv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vwadd_wv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwadd_wx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int8_t rs1, size_t vl) { +vint16m2_t test_vwadd_wx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwadd_wx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwadd_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint16m4_t test_vwadd_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vwadd_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwadd_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint16m4_t test_vwadd_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwadd_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwadd_wv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint8m2_t vs1, size_t vl) { +vint16m4_t test_vwadd_wv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vwadd_wv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwadd_wx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int8_t rs1, size_t vl) { +vint16m4_t test_vwadd_wx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwadd_wx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwadd_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint16m8_t test_vwadd_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vwadd_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwadd_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint16m8_t test_vwadd_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwadd_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwadd_wv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint8m4_t vs1, size_t vl) { +vint16m8_t test_vwadd_wv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vwadd_wv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwadd_wx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int8_t rs1, size_t vl) { +vint16m8_t test_vwadd_wx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwadd_wx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwadd_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint32mf2_t test_vwadd_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vwadd_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwadd_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint32mf2_t test_vwadd_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vwadd_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwadd_wv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { +vint32mf2_t test_vwadd_wv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vwadd_wv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwadd_wx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int16_t rs1, size_t vl) { +vint32mf2_t test_vwadd_wx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vwadd_wx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwadd_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint32m1_t test_vwadd_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vwadd_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwadd_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint32m1_t test_vwadd_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwadd_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwadd_wv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { +vint32m1_t test_vwadd_wv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vwadd_wv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwadd_wx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int16_t rs1, size_t vl) { +vint32m1_t test_vwadd_wx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwadd_wx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwadd_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint32m2_t test_vwadd_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vwadd_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwadd_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint32m2_t test_vwadd_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwadd_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwadd_wv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { +vint32m2_t test_vwadd_wv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vwadd_wv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwadd_wx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int16_t rs1, size_t vl) { +vint32m2_t test_vwadd_wx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwadd_wx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwadd_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint32m4_t test_vwadd_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vwadd_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwadd_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint32m4_t test_vwadd_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwadd_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwadd_wv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint16m2_t vs1, size_t vl) { +vint32m4_t test_vwadd_wv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vwadd_wv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwadd_wx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int16_t rs1, size_t vl) { +vint32m4_t test_vwadd_wx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwadd_wx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwadd_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint32m8_t test_vwadd_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vwadd_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwadd_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint32m8_t test_vwadd_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwadd_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwadd_wv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint16m4_t vs1, size_t vl) { +vint32m8_t test_vwadd_wv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vwadd_wv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwadd_wx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int16_t rs1, size_t vl) { +vint32m8_t test_vwadd_wx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwadd_wx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwadd_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint64m1_t test_vwadd_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vwadd_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwadd_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint64m1_t test_vwadd_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwadd_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwadd_wv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { +vint64m1_t test_vwadd_wv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vwadd_wv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwadd_wx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int32_t rs1, size_t vl) { +vint64m1_t test_vwadd_wx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwadd_wx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwadd_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint64m2_t test_vwadd_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vwadd_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwadd_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint64m2_t test_vwadd_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwadd_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwadd_wv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { +vint64m2_t test_vwadd_wv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vwadd_wv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwadd_wx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int32_t rs1, size_t vl) { +vint64m2_t test_vwadd_wx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwadd_wx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwadd_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint64m4_t test_vwadd_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vwadd_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwadd_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint64m4_t test_vwadd_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwadd_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwadd_wv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint32m2_t vs1, size_t vl) { +vint64m4_t test_vwadd_wv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vwadd_wv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwadd_wx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int32_t rs1, size_t vl) { +vint64m4_t test_vwadd_wx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwadd_wx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwadd_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint64m8_t test_vwadd_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vwadd_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwadd_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint64m8_t test_vwadd_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwadd_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwadd_wv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint32m4_t vs1, size_t vl) { +vint64m8_t test_vwadd_wv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vwadd_wv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwadd_wx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int32_t rs1, size_t vl) { +vint64m8_t test_vwadd_wx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwadd_wx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwadd_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint16mf4_t test_vwadd_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs2, vint8mf8_t vs1, + size_t vl) { return __riscv_vwadd_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwadd_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint16mf4_t test_vwadd_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs2, int8_t rs1, size_t vl) { return __riscv_vwadd_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwadd_wv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { +vint16mf4_t test_vwadd_wv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint8mf8_t vs1, + size_t vl) { return __riscv_vwadd_wv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwadd_wx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int8_t rs1, size_t vl) { +vint16mf4_t test_vwadd_wx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int8_t rs1, size_t vl) { return __riscv_vwadd_wx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwadd_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint16mf2_t test_vwadd_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs2, vint8mf4_t vs1, + size_t vl) { return __riscv_vwadd_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwadd_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint16mf2_t test_vwadd_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs2, int8_t rs1, size_t vl) { return __riscv_vwadd_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwadd_wv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { +vint16mf2_t test_vwadd_wv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint8mf4_t vs1, + size_t vl) { return __riscv_vwadd_wv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwadd_wx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int8_t rs1, size_t vl) { +vint16mf2_t test_vwadd_wx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int8_t rs1, size_t vl) { return __riscv_vwadd_wx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwadd_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint16m1_t test_vwadd_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vwadd_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwadd_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint16m1_t test_vwadd_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwadd_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwadd_wv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { +vint16m1_t test_vwadd_wv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vwadd_wv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwadd_wx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int8_t rs1, size_t vl) { +vint16m1_t test_vwadd_wx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwadd_wx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwadd_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint16m2_t test_vwadd_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vwadd_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwadd_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint16m2_t test_vwadd_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwadd_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwadd_wv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { +vint16m2_t test_vwadd_wv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vwadd_wv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwadd_wx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int8_t rs1, size_t vl) { +vint16m2_t test_vwadd_wx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwadd_wx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwadd_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint16m4_t test_vwadd_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vwadd_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwadd_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint16m4_t test_vwadd_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwadd_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwadd_wv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint8m2_t vs1, size_t vl) { +vint16m4_t test_vwadd_wv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vwadd_wv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwadd_wx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int8_t rs1, size_t vl) { +vint16m4_t test_vwadd_wx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwadd_wx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwadd_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint16m8_t test_vwadd_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vwadd_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwadd_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint16m8_t test_vwadd_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwadd_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwadd_wv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint8m4_t vs1, size_t vl) { +vint16m8_t test_vwadd_wv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vwadd_wv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwadd_wx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int8_t rs1, size_t vl) { +vint16m8_t test_vwadd_wx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwadd_wx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwadd_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint32mf2_t test_vwadd_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vwadd_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwadd_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint32mf2_t test_vwadd_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vwadd_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwadd_wv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { +vint32mf2_t test_vwadd_wv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vwadd_wv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwadd_wx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int16_t rs1, size_t vl) { +vint32mf2_t test_vwadd_wx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vwadd_wx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwadd_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint32m1_t test_vwadd_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vwadd_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwadd_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint32m1_t test_vwadd_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vwadd_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwadd_wv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { +vint32m1_t test_vwadd_wv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vwadd_wv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwadd_wx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int16_t rs1, size_t vl) { +vint32m1_t test_vwadd_wx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwadd_wx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwadd_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint32m2_t test_vwadd_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vwadd_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwadd_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint32m2_t test_vwadd_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwadd_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwadd_wv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { +vint32m2_t test_vwadd_wv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vwadd_wv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwadd_wx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int16_t rs1, size_t vl) { +vint32m2_t test_vwadd_wx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwadd_wx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwadd_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint32m4_t test_vwadd_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vwadd_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwadd_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint32m4_t test_vwadd_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwadd_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwadd_wv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint16m2_t vs1, size_t vl) { +vint32m4_t test_vwadd_wv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vwadd_wv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwadd_wx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int16_t rs1, size_t vl) { +vint32m4_t test_vwadd_wx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwadd_wx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwadd_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint32m8_t test_vwadd_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vwadd_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwadd_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint32m8_t test_vwadd_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwadd_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwadd_wv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint16m4_t vs1, size_t vl) { +vint32m8_t test_vwadd_wv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vwadd_wv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwadd_wx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int16_t rs1, size_t vl) { +vint32m8_t test_vwadd_wx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwadd_wx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwadd_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint64m1_t test_vwadd_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vwadd_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwadd_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint64m1_t test_vwadd_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vwadd_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwadd_wv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { +vint64m1_t test_vwadd_wv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vwadd_wv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwadd_wx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int32_t rs1, size_t vl) { +vint64m1_t test_vwadd_wx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwadd_wx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwadd_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint64m2_t test_vwadd_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vwadd_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwadd_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint64m2_t test_vwadd_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwadd_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwadd_wv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { +vint64m2_t test_vwadd_wv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vwadd_wv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwadd_wx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int32_t rs1, size_t vl) { +vint64m2_t test_vwadd_wx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwadd_wx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwadd_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint64m4_t test_vwadd_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vwadd_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwadd_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint64m4_t test_vwadd_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwadd_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwadd_wv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint32m2_t vs1, size_t vl) { +vint64m4_t test_vwadd_wv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vwadd_wv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwadd_wx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int32_t rs1, size_t vl) { +vint64m4_t test_vwadd_wx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwadd_wx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwadd_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint64m8_t test_vwadd_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vwadd_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwadd_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint64m8_t test_vwadd_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwadd_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwadd_wv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint32m4_t vs1, size_t vl) { +vint64m8_t test_vwadd_wv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vwadd_wv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwadd_wx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int32_t rs1, size_t vl) { +vint64m8_t test_vwadd_wx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwadd_wx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwadd_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint16mf4_t test_vwadd_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { return __riscv_vwadd_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwadd_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint16mf4_t test_vwadd_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs2, int8_t rs1, size_t vl) { return __riscv_vwadd_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwadd_wv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { +vint16mf4_t test_vwadd_wv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint8mf8_t vs1, + size_t vl) { return __riscv_vwadd_wv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwadd_wx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int8_t rs1, size_t vl) { +vint16mf4_t test_vwadd_wx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int8_t rs1, size_t vl) { return __riscv_vwadd_wx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwadd_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint16mf2_t test_vwadd_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { return __riscv_vwadd_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwadd_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint16mf2_t test_vwadd_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs2, int8_t rs1, size_t vl) { return __riscv_vwadd_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwadd_wv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { +vint16mf2_t test_vwadd_wv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint8mf4_t vs1, + size_t vl) { return __riscv_vwadd_wv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwadd_wx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int8_t rs1, size_t vl) { +vint16mf2_t test_vwadd_wx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int8_t rs1, size_t vl) { return __riscv_vwadd_wx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwadd_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint16m1_t test_vwadd_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vwadd_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwadd_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint16m1_t test_vwadd_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwadd_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwadd_wv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { +vint16m1_t test_vwadd_wv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vwadd_wv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwadd_wx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int8_t rs1, size_t vl) { +vint16m1_t test_vwadd_wx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwadd_wx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwadd_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint16m2_t test_vwadd_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vwadd_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwadd_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint16m2_t test_vwadd_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwadd_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwadd_wv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { +vint16m2_t test_vwadd_wv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vwadd_wv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwadd_wx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int8_t rs1, size_t vl) { +vint16m2_t test_vwadd_wx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwadd_wx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwadd_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint16m4_t test_vwadd_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vwadd_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwadd_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint16m4_t test_vwadd_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwadd_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwadd_wv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint8m2_t vs1, size_t vl) { +vint16m4_t test_vwadd_wv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vwadd_wv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwadd_wx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int8_t rs1, size_t vl) { +vint16m4_t test_vwadd_wx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwadd_wx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwadd_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint16m8_t test_vwadd_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vwadd_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwadd_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint16m8_t test_vwadd_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwadd_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwadd_wv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint8m4_t vs1, size_t vl) { +vint16m8_t test_vwadd_wv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vwadd_wv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwadd_wx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int8_t rs1, size_t vl) { +vint16m8_t test_vwadd_wx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwadd_wx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwadd_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint32mf2_t test_vwadd_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vwadd_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwadd_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint32mf2_t test_vwadd_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vwadd_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwadd_wv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { +vint32mf2_t test_vwadd_wv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vwadd_wv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwadd_wx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int16_t rs1, size_t vl) { +vint32mf2_t test_vwadd_wx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vwadd_wx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwadd_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint32m1_t test_vwadd_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vwadd_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwadd_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint32m1_t test_vwadd_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwadd_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwadd_wv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { +vint32m1_t test_vwadd_wv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vwadd_wv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwadd_wx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int16_t rs1, size_t vl) { +vint32m1_t test_vwadd_wx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwadd_wx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwadd_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint32m2_t test_vwadd_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vwadd_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwadd_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint32m2_t test_vwadd_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwadd_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwadd_wv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { +vint32m2_t test_vwadd_wv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vwadd_wv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwadd_wx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int16_t rs1, size_t vl) { +vint32m2_t test_vwadd_wx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwadd_wx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwadd_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint32m4_t test_vwadd_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vwadd_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwadd_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint32m4_t test_vwadd_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwadd_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwadd_wv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint16m2_t vs1, size_t vl) { +vint32m4_t test_vwadd_wv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vwadd_wv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwadd_wx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int16_t rs1, size_t vl) { +vint32m4_t test_vwadd_wx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwadd_wx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwadd_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint32m8_t test_vwadd_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vwadd_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwadd_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint32m8_t test_vwadd_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwadd_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwadd_wv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint16m4_t vs1, size_t vl) { +vint32m8_t test_vwadd_wv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vwadd_wv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwadd_wx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int16_t rs1, size_t vl) { +vint32m8_t test_vwadd_wx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwadd_wx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwadd_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint64m1_t test_vwadd_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vwadd_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwadd_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint64m1_t test_vwadd_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwadd_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwadd_wv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { +vint64m1_t test_vwadd_wv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vwadd_wv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwadd_wx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int32_t rs1, size_t vl) { +vint64m1_t test_vwadd_wx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwadd_wx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwadd_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint64m2_t test_vwadd_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vwadd_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwadd_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint64m2_t test_vwadd_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwadd_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwadd_wv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { +vint64m2_t test_vwadd_wv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vwadd_wv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwadd_wx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int32_t rs1, size_t vl) { +vint64m2_t test_vwadd_wx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwadd_wx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwadd_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint64m4_t test_vwadd_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vwadd_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwadd_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint64m4_t test_vwadd_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwadd_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwadd_wv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint32m2_t vs1, size_t vl) { +vint64m4_t test_vwadd_wv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vwadd_wv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwadd_wx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int32_t rs1, size_t vl) { +vint64m4_t test_vwadd_wx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwadd_wx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwadd_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint64m8_t test_vwadd_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vwadd_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwadd_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint64m8_t test_vwadd_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwadd_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwadd_wv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint32m4_t vs1, size_t vl) { +vint64m8_t test_vwadd_wv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vwadd_wv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwadd_wx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int32_t rs1, size_t vl) { +vint64m8_t test_vwadd_wx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwadd_wx_i64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vwaddu.c b/auto-generated/policy_funcs/llvm-api-tests/vwaddu.c index f2eeb50af..1c7d29299 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vwaddu.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vwaddu.c @@ -5,962 +5,1323 @@ #include -vuint16mf4_t test_vwaddu_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwaddu_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vwaddu_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_vx_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint16mf4_t test_vwaddu_vx_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwaddu_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vwaddu_wv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwaddu_wv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vwaddu_wv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_wx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint8_t rs1, size_t vl) { +vuint16mf4_t test_vwaddu_wx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwaddu_wx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwaddu_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vwaddu_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_vx_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint16mf2_t test_vwaddu_vx_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwaddu_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_wv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwaddu_wv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vwaddu_wv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_wx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint8_t rs1, size_t vl) { +vuint16mf2_t test_vwaddu_wx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwaddu_wx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vwaddu_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwaddu_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vwaddu_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwaddu_vx_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint16m1_t test_vwaddu_vx_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwaddu_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vwaddu_wv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwaddu_wv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vwaddu_wv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwaddu_wx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint8_t rs1, size_t vl) { +vuint16m1_t test_vwaddu_wx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwaddu_wx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vwaddu_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwaddu_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vwaddu_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vwaddu_vx_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint16m2_t test_vwaddu_vx_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwaddu_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vwaddu_wv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwaddu_wv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vwaddu_wv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vwaddu_wx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint8_t rs1, size_t vl) { +vuint16m2_t test_vwaddu_wx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwaddu_wx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vwaddu_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwaddu_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vwaddu_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vwaddu_vx_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint16m4_t test_vwaddu_vx_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwaddu_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vwaddu_wv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwaddu_wv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vwaddu_wv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vwaddu_wx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint8_t rs1, size_t vl) { +vuint16m4_t test_vwaddu_wx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwaddu_wx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vwaddu_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwaddu_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vwaddu_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vwaddu_vx_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint16m8_t test_vwaddu_vx_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwaddu_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vwaddu_wv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwaddu_wv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vwaddu_wv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vwaddu_wx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint8_t rs1, size_t vl) { +vuint16m8_t test_vwaddu_wx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwaddu_wx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwaddu_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vwaddu_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_vx_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint32mf2_t test_vwaddu_vx_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwaddu_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_wv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwaddu_wv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vwaddu_wv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_wx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint16_t rs1, size_t vl) { +vuint32mf2_t test_vwaddu_wx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwaddu_wx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vwaddu_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwaddu_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vwaddu_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwaddu_vx_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint32m1_t test_vwaddu_vx_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwaddu_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vwaddu_wv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwaddu_wv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vwaddu_wv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwaddu_wx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint16_t rs1, size_t vl) { +vuint32m1_t test_vwaddu_wx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwaddu_wx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vwaddu_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwaddu_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vwaddu_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vwaddu_vx_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint32m2_t test_vwaddu_vx_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwaddu_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vwaddu_wv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwaddu_wv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vwaddu_wv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vwaddu_wx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint16_t rs1, size_t vl) { +vuint32m2_t test_vwaddu_wx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwaddu_wx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vwaddu_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwaddu_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vwaddu_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vwaddu_vx_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint32m4_t test_vwaddu_vx_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwaddu_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vwaddu_wv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwaddu_wv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vwaddu_wv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vwaddu_wx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint16_t rs1, size_t vl) { +vuint32m4_t test_vwaddu_wx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwaddu_wx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vwaddu_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwaddu_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vwaddu_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vwaddu_vx_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint32m8_t test_vwaddu_vx_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwaddu_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vwaddu_wv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwaddu_wv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vwaddu_wv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vwaddu_wx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint16_t rs1, size_t vl) { +vuint32m8_t test_vwaddu_wx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwaddu_wx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vwaddu_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwaddu_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vwaddu_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwaddu_vx_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint64m1_t test_vwaddu_vx_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vwaddu_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vwaddu_wv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwaddu_wv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vwaddu_wv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwaddu_wx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint32_t rs1, size_t vl) { +vuint64m1_t test_vwaddu_wx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vwaddu_wx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vwaddu_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwaddu_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vwaddu_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vwaddu_vx_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint64m2_t test_vwaddu_vx_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vwaddu_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vwaddu_wv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwaddu_wv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vwaddu_wv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vwaddu_wx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint32_t rs1, size_t vl) { +vuint64m2_t test_vwaddu_wx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vwaddu_wx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vwaddu_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwaddu_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vwaddu_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vwaddu_vx_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint64m4_t test_vwaddu_vx_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vwaddu_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vwaddu_wv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwaddu_wv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vwaddu_wv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vwaddu_wx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint32_t rs1, size_t vl) { +vuint64m4_t test_vwaddu_wx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vwaddu_wx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vwaddu_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwaddu_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vwaddu_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vwaddu_vx_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint64m8_t test_vwaddu_vx_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vwaddu_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vwaddu_wv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwaddu_wv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vwaddu_wv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vwaddu_wx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint32_t rs1, size_t vl) { +vuint64m8_t test_vwaddu_wx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vwaddu_wx_u64m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vwaddu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwaddu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint16mf4_t test_vwaddu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwaddu_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwaddu_wv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwaddu_wv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_wx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint8_t rs1, size_t vl) { +vuint16mf4_t test_vwaddu_wx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwaddu_wx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwaddu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint16mf2_t test_vwaddu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwaddu_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_wv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwaddu_wv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_wx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint8_t rs1, size_t vl) { +vuint16mf2_t test_vwaddu_wx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwaddu_wx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwaddu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwaddu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwaddu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint16m1_t test_vwaddu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwaddu_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwaddu_wv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwaddu_wv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwaddu_wx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint8_t rs1, size_t vl) { +vuint16m1_t test_vwaddu_wx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwaddu_wx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwaddu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwaddu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwaddu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint16m2_t test_vwaddu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwaddu_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwaddu_wv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwaddu_wv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwaddu_wx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint8_t rs1, size_t vl) { +vuint16m2_t test_vwaddu_wx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwaddu_wx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwaddu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwaddu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwaddu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint16m4_t test_vwaddu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwaddu_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwaddu_wv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwaddu_wv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwaddu_wx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint8_t rs1, size_t vl) { +vuint16m4_t test_vwaddu_wx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwaddu_wx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwaddu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwaddu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwaddu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint16m8_t test_vwaddu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwaddu_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwaddu_wv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwaddu_wv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwaddu_wx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint8_t rs1, size_t vl) { +vuint16m8_t test_vwaddu_wx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwaddu_wx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwaddu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint32mf2_t test_vwaddu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwaddu_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_wv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwaddu_wv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_wx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint16_t rs1, size_t vl) { +vuint32mf2_t test_vwaddu_wx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwaddu_wx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwaddu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwaddu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwaddu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint32m1_t test_vwaddu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwaddu_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwaddu_wv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwaddu_wv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwaddu_wx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint16_t rs1, size_t vl) { +vuint32m1_t test_vwaddu_wx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwaddu_wx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwaddu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwaddu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwaddu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint32m2_t test_vwaddu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwaddu_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwaddu_wv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwaddu_wv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwaddu_wx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint16_t rs1, size_t vl) { +vuint32m2_t test_vwaddu_wx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwaddu_wx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwaddu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwaddu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwaddu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint32m4_t test_vwaddu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwaddu_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwaddu_wv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwaddu_wv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwaddu_wx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint16_t rs1, size_t vl) { +vuint32m4_t test_vwaddu_wx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwaddu_wx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwaddu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwaddu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwaddu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint32m8_t test_vwaddu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwaddu_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwaddu_wv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwaddu_wv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwaddu_wx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint16_t rs1, size_t vl) { +vuint32m8_t test_vwaddu_wx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwaddu_wx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwaddu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwaddu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwaddu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint64m1_t test_vwaddu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vwaddu_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwaddu_wv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwaddu_wv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwaddu_wx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint32_t rs1, size_t vl) { +vuint64m1_t test_vwaddu_wx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwaddu_wx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwaddu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwaddu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwaddu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint64m2_t test_vwaddu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwaddu_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwaddu_wv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwaddu_wv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwaddu_wx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint32_t rs1, size_t vl) { +vuint64m2_t test_vwaddu_wx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwaddu_wx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwaddu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwaddu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwaddu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint64m4_t test_vwaddu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwaddu_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwaddu_wv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwaddu_wv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwaddu_wx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint32_t rs1, size_t vl) { +vuint64m4_t test_vwaddu_wx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwaddu_wx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwaddu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwaddu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwaddu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint64m8_t test_vwaddu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwaddu_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwaddu_wv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwaddu_wv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwaddu_wx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint32_t rs1, size_t vl) { +vuint64m8_t test_vwaddu_wx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwaddu_wx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwaddu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwaddu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint16mf4_t test_vwaddu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwaddu_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwaddu_wv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwaddu_wv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_wx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint8_t rs1, size_t vl) { +vuint16mf4_t test_vwaddu_wx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwaddu_wx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwaddu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint16mf2_t test_vwaddu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwaddu_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_wv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwaddu_wv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_wx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint8_t rs1, size_t vl) { +vuint16mf2_t test_vwaddu_wx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwaddu_wx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwaddu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwaddu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwaddu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint16m1_t test_vwaddu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwaddu_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwaddu_wv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwaddu_wv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwaddu_wx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint8_t rs1, size_t vl) { +vuint16m1_t test_vwaddu_wx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwaddu_wx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwaddu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwaddu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwaddu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint16m2_t test_vwaddu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwaddu_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwaddu_wv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwaddu_wv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwaddu_wx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint8_t rs1, size_t vl) { +vuint16m2_t test_vwaddu_wx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwaddu_wx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwaddu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwaddu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwaddu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint16m4_t test_vwaddu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwaddu_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwaddu_wv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwaddu_wv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwaddu_wx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint8_t rs1, size_t vl) { +vuint16m4_t test_vwaddu_wx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwaddu_wx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwaddu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwaddu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwaddu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint16m8_t test_vwaddu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwaddu_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwaddu_wv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwaddu_wv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwaddu_wx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint8_t rs1, size_t vl) { +vuint16m8_t test_vwaddu_wx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwaddu_wx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwaddu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint32mf2_t test_vwaddu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwaddu_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_wv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwaddu_wv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_wx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint16_t rs1, size_t vl) { +vuint32mf2_t test_vwaddu_wx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwaddu_wx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwaddu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwaddu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwaddu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint32m1_t test_vwaddu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwaddu_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwaddu_wv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwaddu_wv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwaddu_wx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint16_t rs1, size_t vl) { +vuint32m1_t test_vwaddu_wx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwaddu_wx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwaddu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwaddu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwaddu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint32m2_t test_vwaddu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwaddu_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwaddu_wv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwaddu_wv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwaddu_wx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint16_t rs1, size_t vl) { +vuint32m2_t test_vwaddu_wx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwaddu_wx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwaddu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwaddu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwaddu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint32m4_t test_vwaddu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwaddu_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwaddu_wv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwaddu_wv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwaddu_wx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint16_t rs1, size_t vl) { +vuint32m4_t test_vwaddu_wx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwaddu_wx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwaddu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwaddu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwaddu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint32m8_t test_vwaddu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwaddu_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwaddu_wv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwaddu_wv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwaddu_wx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint16_t rs1, size_t vl) { +vuint32m8_t test_vwaddu_wx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwaddu_wx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwaddu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwaddu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwaddu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint64m1_t test_vwaddu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vwaddu_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwaddu_wv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwaddu_wv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwaddu_wx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint32_t rs1, size_t vl) { +vuint64m1_t test_vwaddu_wx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vwaddu_wx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwaddu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwaddu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwaddu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint64m2_t test_vwaddu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vwaddu_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwaddu_wv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwaddu_wv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwaddu_wx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint32_t rs1, size_t vl) { +vuint64m2_t test_vwaddu_wx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vwaddu_wx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwaddu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwaddu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwaddu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint64m4_t test_vwaddu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vwaddu_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwaddu_wv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwaddu_wv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwaddu_wx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint32_t rs1, size_t vl) { +vuint64m4_t test_vwaddu_wx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vwaddu_wx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwaddu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwaddu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwaddu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint64m8_t test_vwaddu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vwaddu_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwaddu_wv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwaddu_wv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwaddu_wx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint32_t rs1, size_t vl) { +vuint64m8_t test_vwaddu_wx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vwaddu_wx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwaddu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwaddu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint16mf4_t test_vwaddu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwaddu_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwaddu_wv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwaddu_wv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwaddu_wx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint8_t rs1, size_t vl) { +vuint16mf4_t test_vwaddu_wx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwaddu_wx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwaddu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint16mf2_t test_vwaddu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwaddu_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwaddu_wv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwaddu_wv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwaddu_wx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint8_t rs1, size_t vl) { +vuint16mf2_t test_vwaddu_wx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwaddu_wx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwaddu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwaddu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwaddu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint16m1_t test_vwaddu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwaddu_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwaddu_wv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwaddu_wv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwaddu_wx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint8_t rs1, size_t vl) { +vuint16m1_t test_vwaddu_wx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwaddu_wx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwaddu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwaddu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vwaddu_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwaddu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint16m2_t test_vwaddu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwaddu_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwaddu_wv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwaddu_wv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwaddu_wx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint8_t rs1, size_t vl) { +vuint16m2_t test_vwaddu_wx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwaddu_wx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwaddu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwaddu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vwaddu_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwaddu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint16m4_t test_vwaddu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwaddu_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwaddu_wv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwaddu_wv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwaddu_wx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint8_t rs1, size_t vl) { +vuint16m4_t test_vwaddu_wx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwaddu_wx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwaddu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwaddu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vwaddu_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwaddu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint16m8_t test_vwaddu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwaddu_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwaddu_wv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwaddu_wv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwaddu_wx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint8_t rs1, size_t vl) { +vuint16m8_t test_vwaddu_wx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwaddu_wx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwaddu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint32mf2_t test_vwaddu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwaddu_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwaddu_wv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwaddu_wv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwaddu_wx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint16_t rs1, size_t vl) { +vuint32mf2_t test_vwaddu_wx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwaddu_wx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwaddu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwaddu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwaddu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint32m1_t test_vwaddu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwaddu_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwaddu_wv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwaddu_wv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwaddu_wx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint16_t rs1, size_t vl) { +vuint32m1_t test_vwaddu_wx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwaddu_wx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwaddu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwaddu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwaddu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint32m2_t test_vwaddu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwaddu_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwaddu_wv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwaddu_wv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwaddu_wx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint16_t rs1, size_t vl) { +vuint32m2_t test_vwaddu_wx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwaddu_wx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwaddu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwaddu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwaddu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint32m4_t test_vwaddu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwaddu_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwaddu_wv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwaddu_wv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwaddu_wx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint16_t rs1, size_t vl) { +vuint32m4_t test_vwaddu_wx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwaddu_wx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwaddu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwaddu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwaddu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint32m8_t test_vwaddu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwaddu_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwaddu_wv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwaddu_wv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwaddu_wx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint16_t rs1, size_t vl) { +vuint32m8_t test_vwaddu_wx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwaddu_wx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwaddu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwaddu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwaddu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint64m1_t test_vwaddu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwaddu_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwaddu_wv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwaddu_wv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwaddu_wx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint32_t rs1, size_t vl) { +vuint64m1_t test_vwaddu_wx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwaddu_wx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwaddu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwaddu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwaddu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint64m2_t test_vwaddu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwaddu_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwaddu_wv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwaddu_wv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwaddu_wx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint32_t rs1, size_t vl) { +vuint64m2_t test_vwaddu_wx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwaddu_wx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwaddu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwaddu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwaddu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint64m4_t test_vwaddu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwaddu_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwaddu_wv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwaddu_wv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwaddu_wx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint32_t rs1, size_t vl) { +vuint64m4_t test_vwaddu_wx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwaddu_wx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwaddu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwaddu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vwaddu_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwaddu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint64m8_t test_vwaddu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwaddu_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwaddu_wv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwaddu_wv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vwaddu_wv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwaddu_wx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint32_t rs1, size_t vl) { +vuint64m8_t test_vwaddu_wx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwaddu_wx_u64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vwcvt.c b/auto-generated/policy_funcs/llvm-api-tests/vwcvt.c index 361a28ac9..4bf476f55 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vwcvt.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vwcvt.c @@ -5,11 +5,13 @@ #include -vint16mf4_t test_vwcvt_x_x_v_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, size_t vl) { +vint16mf4_t test_vwcvt_x_x_v_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, + size_t vl) { return __riscv_vwcvt_x_x_v_i16mf4_tu(vd, vs2, vl); } -vint16mf2_t test_vwcvt_x_x_v_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, size_t vl) { +vint16mf2_t test_vwcvt_x_x_v_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, + size_t vl) { return __riscv_vwcvt_x_x_v_i16mf2_tu(vd, vs2, vl); } @@ -29,11 +31,13 @@ vint16m8_t test_vwcvt_x_x_v_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i16m8_tu(vd, vs2, vl); } -vint32mf2_t test_vwcvt_x_x_v_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, size_t vl) { +vint32mf2_t test_vwcvt_x_x_v_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, + size_t vl) { return __riscv_vwcvt_x_x_v_i32mf2_tu(vd, vs2, vl); } -vint32m1_t test_vwcvt_x_x_v_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, size_t vl) { +vint32m1_t test_vwcvt_x_x_v_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, + size_t vl) { return __riscv_vwcvt_x_x_v_i32m1_tu(vd, vs2, vl); } @@ -49,7 +53,8 @@ vint32m8_t test_vwcvt_x_x_v_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i32m8_tu(vd, vs2, vl); } -vint64m1_t test_vwcvt_x_x_v_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, size_t vl) { +vint64m1_t test_vwcvt_x_x_v_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, + size_t vl) { return __riscv_vwcvt_x_x_v_i64m1_tu(vd, vs2, vl); } @@ -65,182 +70,227 @@ vint64m8_t test_vwcvt_x_x_v_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i64m8_tu(vd, vs2, vl); } -vint16mf4_t test_vwcvt_x_x_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, size_t vl) { +vint16mf4_t test_vwcvt_x_x_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i16mf4_tum(vm, vd, vs2, vl); } -vint16mf2_t test_vwcvt_x_x_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, size_t vl) { +vint16mf2_t test_vwcvt_x_x_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i16mf2_tum(vm, vd, vs2, vl); } -vint16m1_t test_vwcvt_x_x_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, size_t vl) { +vint16m1_t test_vwcvt_x_x_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, + vint8mf2_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i16m1_tum(vm, vd, vs2, vl); } -vint16m2_t test_vwcvt_x_x_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, size_t vl) { +vint16m2_t test_vwcvt_x_x_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, + size_t vl) { return __riscv_vwcvt_x_x_v_i16m2_tum(vm, vd, vs2, vl); } -vint16m4_t test_vwcvt_x_x_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, size_t vl) { +vint16m4_t test_vwcvt_x_x_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, + size_t vl) { return __riscv_vwcvt_x_x_v_i16m4_tum(vm, vd, vs2, vl); } -vint16m8_t test_vwcvt_x_x_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, size_t vl) { +vint16m8_t test_vwcvt_x_x_v_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, + size_t vl) { return __riscv_vwcvt_x_x_v_i16m8_tum(vm, vd, vs2, vl); } -vint32mf2_t test_vwcvt_x_x_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, size_t vl) { +vint32mf2_t test_vwcvt_x_x_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i32mf2_tum(vm, vd, vs2, vl); } -vint32m1_t test_vwcvt_x_x_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, size_t vl) { +vint32m1_t test_vwcvt_x_x_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, + vint16mf2_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i32m1_tum(vm, vd, vs2, vl); } -vint32m2_t test_vwcvt_x_x_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, size_t vl) { +vint32m2_t test_vwcvt_x_x_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, + vint16m1_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i32m2_tum(vm, vd, vs2, vl); } -vint32m4_t test_vwcvt_x_x_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, size_t vl) { +vint32m4_t test_vwcvt_x_x_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, + vint16m2_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i32m4_tum(vm, vd, vs2, vl); } -vint32m8_t test_vwcvt_x_x_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, size_t vl) { +vint32m8_t test_vwcvt_x_x_v_i32m8_tum(vbool4_t vm, vint32m8_t vd, + vint16m4_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i32m8_tum(vm, vd, vs2, vl); } -vint64m1_t test_vwcvt_x_x_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, size_t vl) { +vint64m1_t test_vwcvt_x_x_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, + vint32mf2_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i64m1_tum(vm, vd, vs2, vl); } -vint64m2_t test_vwcvt_x_x_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, size_t vl) { +vint64m2_t test_vwcvt_x_x_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, + vint32m1_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i64m2_tum(vm, vd, vs2, vl); } -vint64m4_t test_vwcvt_x_x_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, size_t vl) { +vint64m4_t test_vwcvt_x_x_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, + vint32m2_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i64m4_tum(vm, vd, vs2, vl); } -vint64m8_t test_vwcvt_x_x_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, size_t vl) { +vint64m8_t test_vwcvt_x_x_v_i64m8_tum(vbool8_t vm, vint64m8_t vd, + vint32m4_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i64m8_tum(vm, vd, vs2, vl); } -vint16mf4_t test_vwcvt_x_x_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, size_t vl) { +vint16mf4_t test_vwcvt_x_x_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i16mf4_tumu(vm, vd, vs2, vl); } -vint16mf2_t test_vwcvt_x_x_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, size_t vl) { +vint16mf2_t test_vwcvt_x_x_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i16mf2_tumu(vm, vd, vs2, vl); } -vint16m1_t test_vwcvt_x_x_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, size_t vl) { +vint16m1_t test_vwcvt_x_x_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + vint8mf2_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i16m1_tumu(vm, vd, vs2, vl); } -vint16m2_t test_vwcvt_x_x_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, size_t vl) { +vint16m2_t test_vwcvt_x_x_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, + vint8m1_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i16m2_tumu(vm, vd, vs2, vl); } -vint16m4_t test_vwcvt_x_x_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, size_t vl) { +vint16m4_t test_vwcvt_x_x_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, + vint8m2_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i16m4_tumu(vm, vd, vs2, vl); } -vint16m8_t test_vwcvt_x_x_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, size_t vl) { +vint16m8_t test_vwcvt_x_x_v_i16m8_tumu(vbool2_t vm, vint16m8_t vd, + vint8m4_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i16m8_tumu(vm, vd, vs2, vl); } -vint32mf2_t test_vwcvt_x_x_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, size_t vl) { +vint32mf2_t test_vwcvt_x_x_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i32mf2_tumu(vm, vd, vs2, vl); } -vint32m1_t test_vwcvt_x_x_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, size_t vl) { +vint32m1_t test_vwcvt_x_x_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vint16mf2_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i32m1_tumu(vm, vd, vs2, vl); } -vint32m2_t test_vwcvt_x_x_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, size_t vl) { +vint32m2_t test_vwcvt_x_x_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + vint16m1_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i32m2_tumu(vm, vd, vs2, vl); } -vint32m4_t test_vwcvt_x_x_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, size_t vl) { +vint32m4_t test_vwcvt_x_x_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, + vint16m2_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i32m4_tumu(vm, vd, vs2, vl); } -vint32m8_t test_vwcvt_x_x_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, size_t vl) { +vint32m8_t test_vwcvt_x_x_v_i32m8_tumu(vbool4_t vm, vint32m8_t vd, + vint16m4_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i32m8_tumu(vm, vd, vs2, vl); } -vint64m1_t test_vwcvt_x_x_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, size_t vl) { +vint64m1_t test_vwcvt_x_x_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + vint32mf2_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i64m1_tumu(vm, vd, vs2, vl); } -vint64m2_t test_vwcvt_x_x_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, size_t vl) { +vint64m2_t test_vwcvt_x_x_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + vint32m1_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i64m2_tumu(vm, vd, vs2, vl); } -vint64m4_t test_vwcvt_x_x_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, size_t vl) { +vint64m4_t test_vwcvt_x_x_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + vint32m2_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i64m4_tumu(vm, vd, vs2, vl); } -vint64m8_t test_vwcvt_x_x_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, size_t vl) { +vint64m8_t test_vwcvt_x_x_v_i64m8_tumu(vbool8_t vm, vint64m8_t vd, + vint32m4_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i64m8_tumu(vm, vd, vs2, vl); } -vint16mf4_t test_vwcvt_x_x_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, size_t vl) { +vint16mf4_t test_vwcvt_x_x_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i16mf4_mu(vm, vd, vs2, vl); } -vint16mf2_t test_vwcvt_x_x_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, size_t vl) { +vint16mf2_t test_vwcvt_x_x_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i16mf2_mu(vm, vd, vs2, vl); } -vint16m1_t test_vwcvt_x_x_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, size_t vl) { +vint16m1_t test_vwcvt_x_x_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, + vint8mf2_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i16m1_mu(vm, vd, vs2, vl); } -vint16m2_t test_vwcvt_x_x_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, size_t vl) { +vint16m2_t test_vwcvt_x_x_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, + size_t vl) { return __riscv_vwcvt_x_x_v_i16m2_mu(vm, vd, vs2, vl); } -vint16m4_t test_vwcvt_x_x_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, size_t vl) { +vint16m4_t test_vwcvt_x_x_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, + size_t vl) { return __riscv_vwcvt_x_x_v_i16m4_mu(vm, vd, vs2, vl); } -vint16m8_t test_vwcvt_x_x_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, size_t vl) { +vint16m8_t test_vwcvt_x_x_v_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, + size_t vl) { return __riscv_vwcvt_x_x_v_i16m8_mu(vm, vd, vs2, vl); } -vint32mf2_t test_vwcvt_x_x_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, size_t vl) { +vint32mf2_t test_vwcvt_x_x_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i32mf2_mu(vm, vd, vs2, vl); } -vint32m1_t test_vwcvt_x_x_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, size_t vl) { +vint32m1_t test_vwcvt_x_x_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, + vint16mf2_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i32m1_mu(vm, vd, vs2, vl); } -vint32m2_t test_vwcvt_x_x_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, size_t vl) { +vint32m2_t test_vwcvt_x_x_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, + vint16m1_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i32m2_mu(vm, vd, vs2, vl); } -vint32m4_t test_vwcvt_x_x_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, size_t vl) { +vint32m4_t test_vwcvt_x_x_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, + size_t vl) { return __riscv_vwcvt_x_x_v_i32m4_mu(vm, vd, vs2, vl); } -vint32m8_t test_vwcvt_x_x_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, size_t vl) { +vint32m8_t test_vwcvt_x_x_v_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, + size_t vl) { return __riscv_vwcvt_x_x_v_i32m8_mu(vm, vd, vs2, vl); } -vint64m1_t test_vwcvt_x_x_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, size_t vl) { +vint64m1_t test_vwcvt_x_x_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, + vint32mf2_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i64m1_mu(vm, vd, vs2, vl); } -vint64m2_t test_vwcvt_x_x_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, size_t vl) { +vint64m2_t test_vwcvt_x_x_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, + vint32m1_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i64m2_mu(vm, vd, vs2, vl); } -vint64m4_t test_vwcvt_x_x_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, size_t vl) { +vint64m4_t test_vwcvt_x_x_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, + vint32m2_t vs2, size_t vl) { return __riscv_vwcvt_x_x_v_i64m4_mu(vm, vd, vs2, vl); } -vint64m8_t test_vwcvt_x_x_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, size_t vl) { +vint64m8_t test_vwcvt_x_x_v_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, + size_t vl) { return __riscv_vwcvt_x_x_v_i64m8_mu(vm, vd, vs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vwcvtu.c b/auto-generated/policy_funcs/llvm-api-tests/vwcvtu.c index f00414526..a828bf41b 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vwcvtu.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vwcvtu.c @@ -5,242 +5,302 @@ #include -vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) { +vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, + size_t vl) { return __riscv_vwcvtu_x_x_v_u16mf4_tu(vd, vs2, vl); } -vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) { +vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, + size_t vl) { return __riscv_vwcvtu_x_x_v_u16mf2_tu(vd, vs2, vl); } -vuint16m1_t test_vwcvtu_x_x_v_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) { +vuint16m1_t test_vwcvtu_x_x_v_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, + size_t vl) { return __riscv_vwcvtu_x_x_v_u16m1_tu(vd, vs2, vl); } -vuint16m2_t test_vwcvtu_x_x_v_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, size_t vl) { +vuint16m2_t test_vwcvtu_x_x_v_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, + size_t vl) { return __riscv_vwcvtu_x_x_v_u16m2_tu(vd, vs2, vl); } -vuint16m4_t test_vwcvtu_x_x_v_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, size_t vl) { +vuint16m4_t test_vwcvtu_x_x_v_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, + size_t vl) { return __riscv_vwcvtu_x_x_v_u16m4_tu(vd, vs2, vl); } -vuint16m8_t test_vwcvtu_x_x_v_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, size_t vl) { +vuint16m8_t test_vwcvtu_x_x_v_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, + size_t vl) { return __riscv_vwcvtu_x_x_v_u16m8_tu(vd, vs2, vl); } -vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) { +vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, + size_t vl) { return __riscv_vwcvtu_x_x_v_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vwcvtu_x_x_v_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) { +vuint32m1_t test_vwcvtu_x_x_v_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, + size_t vl) { return __riscv_vwcvtu_x_x_v_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vwcvtu_x_x_v_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, size_t vl) { +vuint32m2_t test_vwcvtu_x_x_v_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, + size_t vl) { return __riscv_vwcvtu_x_x_v_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vwcvtu_x_x_v_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, size_t vl) { +vuint32m4_t test_vwcvtu_x_x_v_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, + size_t vl) { return __riscv_vwcvtu_x_x_v_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vwcvtu_x_x_v_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, size_t vl) { +vuint32m8_t test_vwcvtu_x_x_v_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, + size_t vl) { return __riscv_vwcvtu_x_x_v_u32m8_tu(vd, vs2, vl); } -vuint64m1_t test_vwcvtu_x_x_v_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint64m1_t test_vwcvtu_x_x_v_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vwcvtu_x_x_v_u64m1_tu(vd, vs2, vl); } -vuint64m2_t test_vwcvtu_x_x_v_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint64m2_t test_vwcvtu_x_x_v_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vwcvtu_x_x_v_u64m2_tu(vd, vs2, vl); } -vuint64m4_t test_vwcvtu_x_x_v_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint64m4_t test_vwcvtu_x_x_v_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vwcvtu_x_x_v_u64m4_tu(vd, vs2, vl); } -vuint64m8_t test_vwcvtu_x_x_v_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint64m8_t test_vwcvtu_x_x_v_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vwcvtu_x_x_v_u64m8_tu(vd, vs2, vl); } -vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) { +vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u16mf4_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) { +vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u16mf2_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vwcvtu_x_x_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) { +vuint16m1_t test_vwcvtu_x_x_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u16m1_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vwcvtu_x_x_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t vl) { +vuint16m2_t test_vwcvtu_x_x_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u16m2_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vwcvtu_x_x_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t vl) { +vuint16m4_t test_vwcvtu_x_x_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u16m4_tum(vm, vd, vs2, vl); } -vuint16m8_t test_vwcvtu_x_x_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t vl) { +vuint16m8_t test_vwcvtu_x_x_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u16m8_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) { +vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u32mf2_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vwcvtu_x_x_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) { +vuint32m1_t test_vwcvtu_x_x_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u32m1_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vwcvtu_x_x_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t vl) { +vuint32m2_t test_vwcvtu_x_x_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u32m2_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vwcvtu_x_x_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t vl) { +vuint32m4_t test_vwcvtu_x_x_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u32m4_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vwcvtu_x_x_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t vl) { +vuint32m8_t test_vwcvtu_x_x_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u32m8_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vwcvtu_x_x_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint64m1_t test_vwcvtu_x_x_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u64m1_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vwcvtu_x_x_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint64m2_t test_vwcvtu_x_x_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u64m2_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vwcvtu_x_x_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint64m4_t test_vwcvtu_x_x_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u64m4_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vwcvtu_x_x_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint64m8_t test_vwcvtu_x_x_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u64m8_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) { +vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u16mf4_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) { +vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u16mf2_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vwcvtu_x_x_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) { +vuint16m1_t test_vwcvtu_x_x_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u16m1_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vwcvtu_x_x_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t vl) { +vuint16m2_t test_vwcvtu_x_x_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u16m2_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vwcvtu_x_x_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t vl) { +vuint16m4_t test_vwcvtu_x_x_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u16m4_tumu(vm, vd, vs2, vl); } -vuint16m8_t test_vwcvtu_x_x_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t vl) { +vuint16m8_t test_vwcvtu_x_x_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u16m8_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) { +vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u32mf2_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vwcvtu_x_x_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) { +vuint32m1_t test_vwcvtu_x_x_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u32m1_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vwcvtu_x_x_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t vl) { +vuint32m2_t test_vwcvtu_x_x_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u32m2_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vwcvtu_x_x_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t vl) { +vuint32m4_t test_vwcvtu_x_x_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u32m4_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vwcvtu_x_x_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t vl) { +vuint32m8_t test_vwcvtu_x_x_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u32m8_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vwcvtu_x_x_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint64m1_t test_vwcvtu_x_x_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u64m1_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vwcvtu_x_x_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint64m2_t test_vwcvtu_x_x_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u64m2_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vwcvtu_x_x_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint64m4_t test_vwcvtu_x_x_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u64m4_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vwcvtu_x_x_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint64m8_t test_vwcvtu_x_x_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u64m8_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) { +vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u16mf4_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) { +vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u16mf2_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vwcvtu_x_x_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) { +vuint16m1_t test_vwcvtu_x_x_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u16m1_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vwcvtu_x_x_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t vl) { +vuint16m2_t test_vwcvtu_x_x_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u16m2_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vwcvtu_x_x_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t vl) { +vuint16m4_t test_vwcvtu_x_x_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u16m4_mu(vm, vd, vs2, vl); } -vuint16m8_t test_vwcvtu_x_x_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t vl) { +vuint16m8_t test_vwcvtu_x_x_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u16m8_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) { +vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u32mf2_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vwcvtu_x_x_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) { +vuint32m1_t test_vwcvtu_x_x_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u32m1_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vwcvtu_x_x_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t vl) { +vuint32m2_t test_vwcvtu_x_x_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u32m2_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vwcvtu_x_x_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t vl) { +vuint32m4_t test_vwcvtu_x_x_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u32m4_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vwcvtu_x_x_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t vl) { +vuint32m8_t test_vwcvtu_x_x_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u32m8_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vwcvtu_x_x_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint64m1_t test_vwcvtu_x_x_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u64m1_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vwcvtu_x_x_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint64m2_t test_vwcvtu_x_x_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u64m2_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vwcvtu_x_x_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint64m4_t test_vwcvtu_x_x_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u64m4_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vwcvtu_x_x_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint64m8_t test_vwcvtu_x_x_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, size_t vl) { return __riscv_vwcvtu_x_x_v_u64m8_mu(vm, vd, vs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vwmacc.c b/auto-generated/policy_funcs/llvm-api-tests/vwmacc.c index 891185d56..9e87f026a 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vwmacc.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vwmacc.c @@ -1,487 +1,625 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vint16mf4_t test_vwmacc_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { +vint16mf4_t test_vwmacc_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs1, + vint8mf8_t vs2, size_t vl) { return __riscv_vwmacc_vv_i16mf4_tu(vd, vs1, vs2, vl); } -vint16mf4_t test_vwmacc_vx_i16mf4_tu(vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { +vint16mf4_t test_vwmacc_vx_i16mf4_tu(vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, + size_t vl) { return __riscv_vwmacc_vx_i16mf4_tu(vd, rs1, vs2, vl); } -vint16mf2_t test_vwmacc_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { +vint16mf2_t test_vwmacc_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs1, + vint8mf4_t vs2, size_t vl) { return __riscv_vwmacc_vv_i16mf2_tu(vd, vs1, vs2, vl); } -vint16mf2_t test_vwmacc_vx_i16mf2_tu(vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { +vint16mf2_t test_vwmacc_vx_i16mf2_tu(vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, + size_t vl) { return __riscv_vwmacc_vx_i16mf2_tu(vd, rs1, vs2, vl); } -vint16m1_t test_vwmacc_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { +vint16m1_t test_vwmacc_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vwmacc_vv_i16m1_tu(vd, vs1, vs2, vl); } -vint16m1_t test_vwmacc_vx_i16m1_tu(vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { +vint16m1_t test_vwmacc_vx_i16m1_tu(vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, + size_t vl) { return __riscv_vwmacc_vx_i16m1_tu(vd, rs1, vs2, vl); } -vint16m2_t test_vwmacc_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { +vint16m2_t test_vwmacc_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, + size_t vl) { return __riscv_vwmacc_vv_i16m2_tu(vd, vs1, vs2, vl); } -vint16m2_t test_vwmacc_vx_i16m2_tu(vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { +vint16m2_t test_vwmacc_vx_i16m2_tu(vint16m2_t vd, int8_t rs1, vint8m1_t vs2, + size_t vl) { return __riscv_vwmacc_vx_i16m2_tu(vd, rs1, vs2, vl); } -vint16m4_t test_vwmacc_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { +vint16m4_t test_vwmacc_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, + size_t vl) { return __riscv_vwmacc_vv_i16m4_tu(vd, vs1, vs2, vl); } -vint16m4_t test_vwmacc_vx_i16m4_tu(vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { +vint16m4_t test_vwmacc_vx_i16m4_tu(vint16m4_t vd, int8_t rs1, vint8m2_t vs2, + size_t vl) { return __riscv_vwmacc_vx_i16m4_tu(vd, rs1, vs2, vl); } -vint16m8_t test_vwmacc_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { +vint16m8_t test_vwmacc_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, + size_t vl) { return __riscv_vwmacc_vv_i16m8_tu(vd, vs1, vs2, vl); } -vint16m8_t test_vwmacc_vx_i16m8_tu(vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { +vint16m8_t test_vwmacc_vx_i16m8_tu(vint16m8_t vd, int8_t rs1, vint8m4_t vs2, + size_t vl) { return __riscv_vwmacc_vx_i16m8_tu(vd, rs1, vs2, vl); } -vint32mf2_t test_vwmacc_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { +vint32mf2_t test_vwmacc_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs1, + vint16mf4_t vs2, size_t vl) { return __riscv_vwmacc_vv_i32mf2_tu(vd, vs1, vs2, vl); } -vint32mf2_t test_vwmacc_vx_i32mf2_tu(vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { +vint32mf2_t test_vwmacc_vx_i32mf2_tu(vint32mf2_t vd, int16_t rs1, + vint16mf4_t vs2, size_t vl) { return __riscv_vwmacc_vx_i32mf2_tu(vd, rs1, vs2, vl); } -vint32m1_t test_vwmacc_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { +vint32m1_t test_vwmacc_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs1, + vint16mf2_t vs2, size_t vl) { return __riscv_vwmacc_vv_i32m1_tu(vd, vs1, vs2, vl); } -vint32m1_t test_vwmacc_vx_i32m1_tu(vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { +vint32m1_t test_vwmacc_vx_i32m1_tu(vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, + size_t vl) { return __riscv_vwmacc_vx_i32m1_tu(vd, rs1, vs2, vl); } -vint32m2_t test_vwmacc_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { +vint32m2_t test_vwmacc_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs1, + vint16m1_t vs2, size_t vl) { return __riscv_vwmacc_vv_i32m2_tu(vd, vs1, vs2, vl); } -vint32m2_t test_vwmacc_vx_i32m2_tu(vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { +vint32m2_t test_vwmacc_vx_i32m2_tu(vint32m2_t vd, int16_t rs1, vint16m1_t vs2, + size_t vl) { return __riscv_vwmacc_vx_i32m2_tu(vd, rs1, vs2, vl); } -vint32m4_t test_vwmacc_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { +vint32m4_t test_vwmacc_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs1, + vint16m2_t vs2, size_t vl) { return __riscv_vwmacc_vv_i32m4_tu(vd, vs1, vs2, vl); } -vint32m4_t test_vwmacc_vx_i32m4_tu(vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { +vint32m4_t test_vwmacc_vx_i32m4_tu(vint32m4_t vd, int16_t rs1, vint16m2_t vs2, + size_t vl) { return __riscv_vwmacc_vx_i32m4_tu(vd, rs1, vs2, vl); } -vint32m8_t test_vwmacc_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { +vint32m8_t test_vwmacc_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs1, + vint16m4_t vs2, size_t vl) { return __riscv_vwmacc_vv_i32m8_tu(vd, vs1, vs2, vl); } -vint32m8_t test_vwmacc_vx_i32m8_tu(vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { +vint32m8_t test_vwmacc_vx_i32m8_tu(vint32m8_t vd, int16_t rs1, vint16m4_t vs2, + size_t vl) { return __riscv_vwmacc_vx_i32m8_tu(vd, rs1, vs2, vl); } -vint64m1_t test_vwmacc_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { +vint64m1_t test_vwmacc_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs1, + vint32mf2_t vs2, size_t vl) { return __riscv_vwmacc_vv_i64m1_tu(vd, vs1, vs2, vl); } -vint64m1_t test_vwmacc_vx_i64m1_tu(vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { +vint64m1_t test_vwmacc_vx_i64m1_tu(vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, + size_t vl) { return __riscv_vwmacc_vx_i64m1_tu(vd, rs1, vs2, vl); } -vint64m2_t test_vwmacc_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { +vint64m2_t test_vwmacc_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs1, + vint32m1_t vs2, size_t vl) { return __riscv_vwmacc_vv_i64m2_tu(vd, vs1, vs2, vl); } -vint64m2_t test_vwmacc_vx_i64m2_tu(vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { +vint64m2_t test_vwmacc_vx_i64m2_tu(vint64m2_t vd, int32_t rs1, vint32m1_t vs2, + size_t vl) { return __riscv_vwmacc_vx_i64m2_tu(vd, rs1, vs2, vl); } -vint64m4_t test_vwmacc_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { +vint64m4_t test_vwmacc_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs1, + vint32m2_t vs2, size_t vl) { return __riscv_vwmacc_vv_i64m4_tu(vd, vs1, vs2, vl); } -vint64m4_t test_vwmacc_vx_i64m4_tu(vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { +vint64m4_t test_vwmacc_vx_i64m4_tu(vint64m4_t vd, int32_t rs1, vint32m2_t vs2, + size_t vl) { return __riscv_vwmacc_vx_i64m4_tu(vd, rs1, vs2, vl); } -vint64m8_t test_vwmacc_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { +vint64m8_t test_vwmacc_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs1, + vint32m4_t vs2, size_t vl) { return __riscv_vwmacc_vv_i64m8_tu(vd, vs1, vs2, vl); } -vint64m8_t test_vwmacc_vx_i64m8_tu(vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { +vint64m8_t test_vwmacc_vx_i64m8_tu(vint64m8_t vd, int32_t rs1, vint32m4_t vs2, + size_t vl) { return __riscv_vwmacc_vx_i64m8_tu(vd, rs1, vs2, vl); } -vint16mf4_t test_vwmacc_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { +vint16mf4_t test_vwmacc_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs1, vint8mf8_t vs2, + size_t vl) { return __riscv_vwmacc_vv_i16mf4_tum(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vwmacc_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { +vint16mf4_t test_vwmacc_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, int8_t rs1, + vint8mf8_t vs2, size_t vl) { return __riscv_vwmacc_vx_i16mf4_tum(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmacc_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { +vint16mf2_t test_vwmacc_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs1, vint8mf4_t vs2, + size_t vl) { return __riscv_vwmacc_vv_i16mf2_tum(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vwmacc_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { +vint16mf2_t test_vwmacc_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, int8_t rs1, + vint8mf4_t vs2, size_t vl) { return __riscv_vwmacc_vx_i16mf2_tum(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmacc_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { +vint16m1_t test_vwmacc_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vwmacc_vv_i16m1_tum(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vwmacc_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { +vint16m1_t test_vwmacc_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, int8_t rs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vwmacc_vx_i16m1_tum(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmacc_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { +vint16m2_t test_vwmacc_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs1, + vint8m1_t vs2, size_t vl) { return __riscv_vwmacc_vv_i16m2_tum(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vwmacc_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { +vint16m2_t test_vwmacc_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, int8_t rs1, + vint8m1_t vs2, size_t vl) { return __riscv_vwmacc_vx_i16m2_tum(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmacc_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { +vint16m4_t test_vwmacc_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs1, + vint8m2_t vs2, size_t vl) { return __riscv_vwmacc_vv_i16m4_tum(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vwmacc_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { +vint16m4_t test_vwmacc_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, int8_t rs1, + vint8m2_t vs2, size_t vl) { return __riscv_vwmacc_vx_i16m4_tum(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmacc_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { +vint16m8_t test_vwmacc_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs1, + vint8m4_t vs2, size_t vl) { return __riscv_vwmacc_vv_i16m8_tum(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vwmacc_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { +vint16m8_t test_vwmacc_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, int8_t rs1, + vint8m4_t vs2, size_t vl) { return __riscv_vwmacc_vx_i16m8_tum(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmacc_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { +vint32mf2_t test_vwmacc_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs1, vint16mf4_t vs2, + size_t vl) { return __riscv_vwmacc_vv_i32mf2_tum(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vwmacc_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { +vint32mf2_t test_vwmacc_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, int16_t rs1, + vint16mf4_t vs2, size_t vl) { return __riscv_vwmacc_vx_i32mf2_tum(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmacc_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { +vint32m1_t test_vwmacc_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, + vint16mf2_t vs1, vint16mf2_t vs2, + size_t vl) { return __riscv_vwmacc_vv_i32m1_tum(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vwmacc_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { +vint32m1_t test_vwmacc_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, int16_t rs1, + vint16mf2_t vs2, size_t vl) { return __riscv_vwmacc_vx_i32m1_tum(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmacc_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { +vint32m2_t test_vwmacc_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs1, + vint16m1_t vs2, size_t vl) { return __riscv_vwmacc_vv_i32m2_tum(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vwmacc_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { +vint32m2_t test_vwmacc_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, int16_t rs1, + vint16m1_t vs2, size_t vl) { return __riscv_vwmacc_vx_i32m2_tum(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmacc_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { +vint32m4_t test_vwmacc_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs1, + vint16m2_t vs2, size_t vl) { return __riscv_vwmacc_vv_i32m4_tum(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vwmacc_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { +vint32m4_t test_vwmacc_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, int16_t rs1, + vint16m2_t vs2, size_t vl) { return __riscv_vwmacc_vx_i32m4_tum(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmacc_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { +vint32m8_t test_vwmacc_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs1, + vint16m4_t vs2, size_t vl) { return __riscv_vwmacc_vv_i32m8_tum(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vwmacc_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { +vint32m8_t test_vwmacc_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, int16_t rs1, + vint16m4_t vs2, size_t vl) { return __riscv_vwmacc_vx_i32m8_tum(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmacc_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { +vint64m1_t test_vwmacc_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, + vint32mf2_t vs1, vint32mf2_t vs2, + size_t vl) { return __riscv_vwmacc_vv_i64m1_tum(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vwmacc_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { +vint64m1_t test_vwmacc_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, int32_t rs1, + vint32mf2_t vs2, size_t vl) { return __riscv_vwmacc_vx_i64m1_tum(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmacc_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { +vint64m2_t test_vwmacc_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs1, + vint32m1_t vs2, size_t vl) { return __riscv_vwmacc_vv_i64m2_tum(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vwmacc_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { +vint64m2_t test_vwmacc_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, int32_t rs1, + vint32m1_t vs2, size_t vl) { return __riscv_vwmacc_vx_i64m2_tum(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmacc_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { +vint64m4_t test_vwmacc_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs1, + vint32m2_t vs2, size_t vl) { return __riscv_vwmacc_vv_i64m4_tum(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vwmacc_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { +vint64m4_t test_vwmacc_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, int32_t rs1, + vint32m2_t vs2, size_t vl) { return __riscv_vwmacc_vx_i64m4_tum(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmacc_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { +vint64m8_t test_vwmacc_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs1, + vint32m4_t vs2, size_t vl) { return __riscv_vwmacc_vv_i64m8_tum(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vwmacc_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { +vint64m8_t test_vwmacc_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, int32_t rs1, + vint32m4_t vs2, size_t vl) { return __riscv_vwmacc_vx_i64m8_tum(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vwmacc_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { +vint16mf4_t test_vwmacc_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs1, vint8mf8_t vs2, + size_t vl) { return __riscv_vwmacc_vv_i16mf4_tumu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vwmacc_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { +vint16mf4_t test_vwmacc_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, int8_t rs1, + vint8mf8_t vs2, size_t vl) { return __riscv_vwmacc_vx_i16mf4_tumu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmacc_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { +vint16mf2_t test_vwmacc_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs1, vint8mf4_t vs2, + size_t vl) { return __riscv_vwmacc_vv_i16mf2_tumu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vwmacc_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { +vint16mf2_t test_vwmacc_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, int8_t rs1, + vint8mf4_t vs2, size_t vl) { return __riscv_vwmacc_vx_i16mf2_tumu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmacc_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { +vint16m1_t test_vwmacc_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + vint8mf2_t vs1, vint8mf2_t vs2, + size_t vl) { return __riscv_vwmacc_vv_i16m1_tumu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vwmacc_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { +vint16m1_t test_vwmacc_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, int8_t rs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vwmacc_vx_i16m1_tumu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmacc_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { +vint16m2_t test_vwmacc_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs1, + vint8m1_t vs2, size_t vl) { return __riscv_vwmacc_vv_i16m2_tumu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vwmacc_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { +vint16m2_t test_vwmacc_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, int8_t rs1, + vint8m1_t vs2, size_t vl) { return __riscv_vwmacc_vx_i16m2_tumu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmacc_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { +vint16m4_t test_vwmacc_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs1, + vint8m2_t vs2, size_t vl) { return __riscv_vwmacc_vv_i16m4_tumu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vwmacc_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { +vint16m4_t test_vwmacc_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, int8_t rs1, + vint8m2_t vs2, size_t vl) { return __riscv_vwmacc_vx_i16m4_tumu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmacc_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { +vint16m8_t test_vwmacc_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs1, + vint8m4_t vs2, size_t vl) { return __riscv_vwmacc_vv_i16m8_tumu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vwmacc_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { +vint16m8_t test_vwmacc_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, int8_t rs1, + vint8m4_t vs2, size_t vl) { return __riscv_vwmacc_vx_i16m8_tumu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmacc_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { +vint32mf2_t test_vwmacc_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs1, vint16mf4_t vs2, + size_t vl) { return __riscv_vwmacc_vv_i32mf2_tumu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vwmacc_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { +vint32mf2_t test_vwmacc_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + int16_t rs1, vint16mf4_t vs2, + size_t vl) { return __riscv_vwmacc_vx_i32mf2_tumu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmacc_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { +vint32m1_t test_vwmacc_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vint16mf2_t vs1, vint16mf2_t vs2, + size_t vl) { return __riscv_vwmacc_vv_i32m1_tumu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vwmacc_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { +vint32m1_t test_vwmacc_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, int16_t rs1, + vint16mf2_t vs2, size_t vl) { return __riscv_vwmacc_vx_i32m1_tumu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmacc_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { +vint32m2_t test_vwmacc_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + vint16m1_t vs1, vint16m1_t vs2, + size_t vl) { return __riscv_vwmacc_vv_i32m2_tumu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vwmacc_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { +vint32m2_t test_vwmacc_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, int16_t rs1, + vint16m1_t vs2, size_t vl) { return __riscv_vwmacc_vx_i32m2_tumu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmacc_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { +vint32m4_t test_vwmacc_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs1, + vint16m2_t vs2, size_t vl) { return __riscv_vwmacc_vv_i32m4_tumu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vwmacc_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { +vint32m4_t test_vwmacc_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, int16_t rs1, + vint16m2_t vs2, size_t vl) { return __riscv_vwmacc_vx_i32m4_tumu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmacc_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { +vint32m8_t test_vwmacc_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs1, + vint16m4_t vs2, size_t vl) { return __riscv_vwmacc_vv_i32m8_tumu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vwmacc_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { +vint32m8_t test_vwmacc_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, int16_t rs1, + vint16m4_t vs2, size_t vl) { return __riscv_vwmacc_vx_i32m8_tumu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmacc_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { +vint64m1_t test_vwmacc_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + vint32mf2_t vs1, vint32mf2_t vs2, + size_t vl) { return __riscv_vwmacc_vv_i64m1_tumu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vwmacc_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { +vint64m1_t test_vwmacc_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, int32_t rs1, + vint32mf2_t vs2, size_t vl) { return __riscv_vwmacc_vx_i64m1_tumu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmacc_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { +vint64m2_t test_vwmacc_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + vint32m1_t vs1, vint32m1_t vs2, + size_t vl) { return __riscv_vwmacc_vv_i64m2_tumu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vwmacc_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { +vint64m2_t test_vwmacc_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, int32_t rs1, + vint32m1_t vs2, size_t vl) { return __riscv_vwmacc_vx_i64m2_tumu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmacc_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { +vint64m4_t test_vwmacc_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + vint32m2_t vs1, vint32m2_t vs2, + size_t vl) { return __riscv_vwmacc_vv_i64m4_tumu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vwmacc_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { +vint64m4_t test_vwmacc_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, int32_t rs1, + vint32m2_t vs2, size_t vl) { return __riscv_vwmacc_vx_i64m4_tumu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmacc_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { +vint64m8_t test_vwmacc_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs1, + vint32m4_t vs2, size_t vl) { return __riscv_vwmacc_vv_i64m8_tumu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vwmacc_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { +vint64m8_t test_vwmacc_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, int32_t rs1, + vint32m4_t vs2, size_t vl) { return __riscv_vwmacc_vx_i64m8_tumu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vwmacc_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs1, vint8mf8_t vs2, size_t vl) { +vint16mf4_t test_vwmacc_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs1, vint8mf8_t vs2, + size_t vl) { return __riscv_vwmacc_vv_i16mf4_mu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vwmacc_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, int8_t rs1, vint8mf8_t vs2, size_t vl) { +vint16mf4_t test_vwmacc_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, int8_t rs1, + vint8mf8_t vs2, size_t vl) { return __riscv_vwmacc_vx_i16mf4_mu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmacc_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs1, vint8mf4_t vs2, size_t vl) { +vint16mf2_t test_vwmacc_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs1, vint8mf4_t vs2, + size_t vl) { return __riscv_vwmacc_vv_i16mf2_mu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vwmacc_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, int8_t rs1, vint8mf4_t vs2, size_t vl) { +vint16mf2_t test_vwmacc_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, int8_t rs1, + vint8mf4_t vs2, size_t vl) { return __riscv_vwmacc_vx_i16mf2_mu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmacc_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs1, vint8mf2_t vs2, size_t vl) { +vint16m1_t test_vwmacc_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vwmacc_vv_i16m1_mu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vwmacc_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, int8_t rs1, vint8mf2_t vs2, size_t vl) { +vint16m1_t test_vwmacc_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, int8_t rs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vwmacc_vx_i16m1_mu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmacc_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs1, vint8m1_t vs2, size_t vl) { +vint16m2_t test_vwmacc_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs1, + vint8m1_t vs2, size_t vl) { return __riscv_vwmacc_vv_i16m2_mu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vwmacc_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, int8_t rs1, vint8m1_t vs2, size_t vl) { +vint16m2_t test_vwmacc_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, int8_t rs1, + vint8m1_t vs2, size_t vl) { return __riscv_vwmacc_vx_i16m2_mu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmacc_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs1, vint8m2_t vs2, size_t vl) { +vint16m4_t test_vwmacc_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs1, + vint8m2_t vs2, size_t vl) { return __riscv_vwmacc_vv_i16m4_mu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vwmacc_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, int8_t rs1, vint8m2_t vs2, size_t vl) { +vint16m4_t test_vwmacc_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, int8_t rs1, + vint8m2_t vs2, size_t vl) { return __riscv_vwmacc_vx_i16m4_mu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmacc_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs1, vint8m4_t vs2, size_t vl) { +vint16m8_t test_vwmacc_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs1, + vint8m4_t vs2, size_t vl) { return __riscv_vwmacc_vv_i16m8_mu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vwmacc_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, int8_t rs1, vint8m4_t vs2, size_t vl) { +vint16m8_t test_vwmacc_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, int8_t rs1, + vint8m4_t vs2, size_t vl) { return __riscv_vwmacc_vx_i16m8_mu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmacc_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs1, vint16mf4_t vs2, size_t vl) { +vint32mf2_t test_vwmacc_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs1, vint16mf4_t vs2, + size_t vl) { return __riscv_vwmacc_vv_i32mf2_mu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vwmacc_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, int16_t rs1, vint16mf4_t vs2, size_t vl) { +vint32mf2_t test_vwmacc_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, int16_t rs1, + vint16mf4_t vs2, size_t vl) { return __riscv_vwmacc_vx_i32mf2_mu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmacc_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs1, vint16mf2_t vs2, size_t vl) { +vint32m1_t test_vwmacc_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs1, + vint16mf2_t vs2, size_t vl) { return __riscv_vwmacc_vv_i32m1_mu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vwmacc_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, int16_t rs1, vint16mf2_t vs2, size_t vl) { +vint32m1_t test_vwmacc_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, int16_t rs1, + vint16mf2_t vs2, size_t vl) { return __riscv_vwmacc_vx_i32m1_mu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmacc_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs1, vint16m1_t vs2, size_t vl) { +vint32m2_t test_vwmacc_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs1, + vint16m1_t vs2, size_t vl) { return __riscv_vwmacc_vv_i32m2_mu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vwmacc_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, int16_t rs1, vint16m1_t vs2, size_t vl) { +vint32m2_t test_vwmacc_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, int16_t rs1, + vint16m1_t vs2, size_t vl) { return __riscv_vwmacc_vx_i32m2_mu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmacc_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs1, vint16m2_t vs2, size_t vl) { +vint32m4_t test_vwmacc_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs1, + vint16m2_t vs2, size_t vl) { return __riscv_vwmacc_vv_i32m4_mu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vwmacc_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, int16_t rs1, vint16m2_t vs2, size_t vl) { +vint32m4_t test_vwmacc_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, int16_t rs1, + vint16m2_t vs2, size_t vl) { return __riscv_vwmacc_vx_i32m4_mu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmacc_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs1, vint16m4_t vs2, size_t vl) { +vint32m8_t test_vwmacc_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs1, + vint16m4_t vs2, size_t vl) { return __riscv_vwmacc_vv_i32m8_mu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vwmacc_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, int16_t rs1, vint16m4_t vs2, size_t vl) { +vint32m8_t test_vwmacc_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, int16_t rs1, + vint16m4_t vs2, size_t vl) { return __riscv_vwmacc_vx_i32m8_mu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmacc_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs1, vint32mf2_t vs2, size_t vl) { +vint64m1_t test_vwmacc_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs1, + vint32mf2_t vs2, size_t vl) { return __riscv_vwmacc_vv_i64m1_mu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vwmacc_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, int32_t rs1, vint32mf2_t vs2, size_t vl) { +vint64m1_t test_vwmacc_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, int32_t rs1, + vint32mf2_t vs2, size_t vl) { return __riscv_vwmacc_vx_i64m1_mu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmacc_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs1, vint32m1_t vs2, size_t vl) { +vint64m2_t test_vwmacc_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs1, + vint32m1_t vs2, size_t vl) { return __riscv_vwmacc_vv_i64m2_mu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vwmacc_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, int32_t rs1, vint32m1_t vs2, size_t vl) { +vint64m2_t test_vwmacc_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, int32_t rs1, + vint32m1_t vs2, size_t vl) { return __riscv_vwmacc_vx_i64m2_mu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmacc_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs1, vint32m2_t vs2, size_t vl) { +vint64m4_t test_vwmacc_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs1, + vint32m2_t vs2, size_t vl) { return __riscv_vwmacc_vv_i64m4_mu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vwmacc_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, int32_t rs1, vint32m2_t vs2, size_t vl) { +vint64m4_t test_vwmacc_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, int32_t rs1, + vint32m2_t vs2, size_t vl) { return __riscv_vwmacc_vx_i64m4_mu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmacc_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs1, vint32m4_t vs2, size_t vl) { +vint64m8_t test_vwmacc_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs1, + vint32m4_t vs2, size_t vl) { return __riscv_vwmacc_vv_i64m8_mu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vwmacc_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, int32_t rs1, vint32m4_t vs2, size_t vl) { +vint64m8_t test_vwmacc_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, int32_t rs1, + vint32m4_t vs2, size_t vl) { return __riscv_vwmacc_vx_i64m8_mu(vm, vd, rs1, vs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vwmaccsu.c b/auto-generated/policy_funcs/llvm-api-tests/vwmaccsu.c index e497c86c7..94b6af522 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vwmaccsu.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vwmaccsu.c @@ -1,487 +1,650 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vint16mf4_t test_vwmaccsu_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { +vint16mf4_t test_vwmaccsu_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs1, + vuint8mf8_t vs2, size_t vl) { return __riscv_vwmaccsu_vv_i16mf4_tu(vd, vs1, vs2, vl); } -vint16mf4_t test_vwmaccsu_vx_i16mf4_tu(vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { +vint16mf4_t test_vwmaccsu_vx_i16mf4_tu(vint16mf4_t vd, int8_t rs1, + vuint8mf8_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i16mf4_tu(vd, rs1, vs2, vl); } -vint16mf2_t test_vwmaccsu_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { +vint16mf2_t test_vwmaccsu_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs1, + vuint8mf4_t vs2, size_t vl) { return __riscv_vwmaccsu_vv_i16mf2_tu(vd, vs1, vs2, vl); } -vint16mf2_t test_vwmaccsu_vx_i16mf2_tu(vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { +vint16mf2_t test_vwmaccsu_vx_i16mf2_tu(vint16mf2_t vd, int8_t rs1, + vuint8mf4_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i16mf2_tu(vd, rs1, vs2, vl); } -vint16m1_t test_vwmaccsu_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { +vint16m1_t test_vwmaccsu_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs1, + vuint8mf2_t vs2, size_t vl) { return __riscv_vwmaccsu_vv_i16m1_tu(vd, vs1, vs2, vl); } -vint16m1_t test_vwmaccsu_vx_i16m1_tu(vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { +vint16m1_t test_vwmaccsu_vx_i16m1_tu(vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, + size_t vl) { return __riscv_vwmaccsu_vx_i16m1_tu(vd, rs1, vs2, vl); } -vint16m2_t test_vwmaccsu_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { +vint16m2_t test_vwmaccsu_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vwmaccsu_vv_i16m2_tu(vd, vs1, vs2, vl); } -vint16m2_t test_vwmaccsu_vx_i16m2_tu(vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { +vint16m2_t test_vwmaccsu_vx_i16m2_tu(vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, + size_t vl) { return __riscv_vwmaccsu_vx_i16m2_tu(vd, rs1, vs2, vl); } -vint16m4_t test_vwmaccsu_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { +vint16m4_t test_vwmaccsu_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vwmaccsu_vv_i16m4_tu(vd, vs1, vs2, vl); } -vint16m4_t test_vwmaccsu_vx_i16m4_tu(vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { +vint16m4_t test_vwmaccsu_vx_i16m4_tu(vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, + size_t vl) { return __riscv_vwmaccsu_vx_i16m4_tu(vd, rs1, vs2, vl); } -vint16m8_t test_vwmaccsu_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { +vint16m8_t test_vwmaccsu_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vwmaccsu_vv_i16m8_tu(vd, vs1, vs2, vl); } -vint16m8_t test_vwmaccsu_vx_i16m8_tu(vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { +vint16m8_t test_vwmaccsu_vx_i16m8_tu(vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, + size_t vl) { return __riscv_vwmaccsu_vx_i16m8_tu(vd, rs1, vs2, vl); } -vint32mf2_t test_vwmaccsu_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { +vint32mf2_t test_vwmaccsu_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs1, + vuint16mf4_t vs2, size_t vl) { return __riscv_vwmaccsu_vv_i32mf2_tu(vd, vs1, vs2, vl); } -vint32mf2_t test_vwmaccsu_vx_i32mf2_tu(vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { +vint32mf2_t test_vwmaccsu_vx_i32mf2_tu(vint32mf2_t vd, int16_t rs1, + vuint16mf4_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i32mf2_tu(vd, rs1, vs2, vl); } -vint32m1_t test_vwmaccsu_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { +vint32m1_t test_vwmaccsu_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs1, + vuint16mf2_t vs2, size_t vl) { return __riscv_vwmaccsu_vv_i32m1_tu(vd, vs1, vs2, vl); } -vint32m1_t test_vwmaccsu_vx_i32m1_tu(vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { +vint32m1_t test_vwmaccsu_vx_i32m1_tu(vint32m1_t vd, int16_t rs1, + vuint16mf2_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i32m1_tu(vd, rs1, vs2, vl); } -vint32m2_t test_vwmaccsu_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { +vint32m2_t test_vwmaccsu_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs1, + vuint16m1_t vs2, size_t vl) { return __riscv_vwmaccsu_vv_i32m2_tu(vd, vs1, vs2, vl); } -vint32m2_t test_vwmaccsu_vx_i32m2_tu(vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { +vint32m2_t test_vwmaccsu_vx_i32m2_tu(vint32m2_t vd, int16_t rs1, + vuint16m1_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i32m2_tu(vd, rs1, vs2, vl); } -vint32m4_t test_vwmaccsu_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { +vint32m4_t test_vwmaccsu_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vwmaccsu_vv_i32m4_tu(vd, vs1, vs2, vl); } -vint32m4_t test_vwmaccsu_vx_i32m4_tu(vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { +vint32m4_t test_vwmaccsu_vx_i32m4_tu(vint32m4_t vd, int16_t rs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i32m4_tu(vd, rs1, vs2, vl); } -vint32m8_t test_vwmaccsu_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { +vint32m8_t test_vwmaccsu_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vwmaccsu_vv_i32m8_tu(vd, vs1, vs2, vl); } -vint32m8_t test_vwmaccsu_vx_i32m8_tu(vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { +vint32m8_t test_vwmaccsu_vx_i32m8_tu(vint32m8_t vd, int16_t rs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i32m8_tu(vd, rs1, vs2, vl); } -vint64m1_t test_vwmaccsu_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { +vint64m1_t test_vwmaccsu_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs1, + vuint32mf2_t vs2, size_t vl) { return __riscv_vwmaccsu_vv_i64m1_tu(vd, vs1, vs2, vl); } -vint64m1_t test_vwmaccsu_vx_i64m1_tu(vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { +vint64m1_t test_vwmaccsu_vx_i64m1_tu(vint64m1_t vd, int32_t rs1, + vuint32mf2_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i64m1_tu(vd, rs1, vs2, vl); } -vint64m2_t test_vwmaccsu_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { +vint64m2_t test_vwmaccsu_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs1, + vuint32m1_t vs2, size_t vl) { return __riscv_vwmaccsu_vv_i64m2_tu(vd, vs1, vs2, vl); } -vint64m2_t test_vwmaccsu_vx_i64m2_tu(vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { +vint64m2_t test_vwmaccsu_vx_i64m2_tu(vint64m2_t vd, int32_t rs1, + vuint32m1_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i64m2_tu(vd, rs1, vs2, vl); } -vint64m4_t test_vwmaccsu_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { +vint64m4_t test_vwmaccsu_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs1, + vuint32m2_t vs2, size_t vl) { return __riscv_vwmaccsu_vv_i64m4_tu(vd, vs1, vs2, vl); } -vint64m4_t test_vwmaccsu_vx_i64m4_tu(vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { +vint64m4_t test_vwmaccsu_vx_i64m4_tu(vint64m4_t vd, int32_t rs1, + vuint32m2_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i64m4_tu(vd, rs1, vs2, vl); } -vint64m8_t test_vwmaccsu_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { +vint64m8_t test_vwmaccsu_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vwmaccsu_vv_i64m8_tu(vd, vs1, vs2, vl); } -vint64m8_t test_vwmaccsu_vx_i64m8_tu(vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { +vint64m8_t test_vwmaccsu_vx_i64m8_tu(vint64m8_t vd, int32_t rs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i64m8_tu(vd, rs1, vs2, vl); } -vint16mf4_t test_vwmaccsu_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { +vint16mf4_t test_vwmaccsu_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs1, vuint8mf8_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i16mf4_tum(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vwmaccsu_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { +vint16mf4_t test_vwmaccsu_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + int8_t rs1, vuint8mf8_t vs2, + size_t vl) { return __riscv_vwmaccsu_vx_i16mf4_tum(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmaccsu_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { +vint16mf2_t test_vwmaccsu_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs1, vuint8mf4_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i16mf2_tum(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vwmaccsu_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { +vint16mf2_t test_vwmaccsu_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + int8_t rs1, vuint8mf4_t vs2, + size_t vl) { return __riscv_vwmaccsu_vx_i16mf2_tum(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmaccsu_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { +vint16m1_t test_vwmaccsu_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, + vint8mf2_t vs1, vuint8mf2_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i16m1_tum(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vwmaccsu_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { +vint16m1_t test_vwmaccsu_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, int8_t rs1, + vuint8mf2_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i16m1_tum(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmaccsu_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { +vint16m2_t test_vwmaccsu_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vwmaccsu_vv_i16m2_tum(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vwmaccsu_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { +vint16m2_t test_vwmaccsu_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, int8_t rs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i16m2_tum(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmaccsu_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { +vint16m4_t test_vwmaccsu_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vwmaccsu_vv_i16m4_tum(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vwmaccsu_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { +vint16m4_t test_vwmaccsu_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, int8_t rs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i16m4_tum(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmaccsu_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { +vint16m8_t test_vwmaccsu_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vwmaccsu_vv_i16m8_tum(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vwmaccsu_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { +vint16m8_t test_vwmaccsu_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, int8_t rs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i16m8_tum(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmaccsu_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { +vint32mf2_t test_vwmaccsu_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i32mf2_tum(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vwmaccsu_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { +vint32mf2_t test_vwmaccsu_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + int16_t rs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vwmaccsu_vx_i32mf2_tum(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmaccsu_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { +vint32m1_t test_vwmaccsu_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, + vint16mf2_t vs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i32m1_tum(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vwmaccsu_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { +vint32m1_t test_vwmaccsu_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, int16_t rs1, + vuint16mf2_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i32m1_tum(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmaccsu_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { +vint32m2_t test_vwmaccsu_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, + vint16m1_t vs1, vuint16m1_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i32m2_tum(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vwmaccsu_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { +vint32m2_t test_vwmaccsu_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, int16_t rs1, + vuint16m1_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i32m2_tum(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmaccsu_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { +vint32m4_t test_vwmaccsu_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, + vint16m2_t vs1, vuint16m2_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i32m4_tum(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vwmaccsu_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { +vint32m4_t test_vwmaccsu_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, int16_t rs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i32m4_tum(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmaccsu_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { +vint32m8_t test_vwmaccsu_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, + vint16m4_t vs1, vuint16m4_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i32m8_tum(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vwmaccsu_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { +vint32m8_t test_vwmaccsu_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, int16_t rs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i32m8_tum(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmaccsu_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { +vint64m1_t test_vwmaccsu_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, + vint32mf2_t vs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i64m1_tum(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vwmaccsu_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { +vint64m1_t test_vwmaccsu_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, int32_t rs1, + vuint32mf2_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i64m1_tum(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmaccsu_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { +vint64m2_t test_vwmaccsu_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, + vint32m1_t vs1, vuint32m1_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i64m2_tum(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vwmaccsu_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { +vint64m2_t test_vwmaccsu_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, int32_t rs1, + vuint32m1_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i64m2_tum(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmaccsu_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { +vint64m4_t test_vwmaccsu_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, + vint32m2_t vs1, vuint32m2_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i64m4_tum(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vwmaccsu_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { +vint64m4_t test_vwmaccsu_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, int32_t rs1, + vuint32m2_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i64m4_tum(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmaccsu_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { +vint64m8_t test_vwmaccsu_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, + vint32m4_t vs1, vuint32m4_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i64m8_tum(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vwmaccsu_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { +vint64m8_t test_vwmaccsu_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, int32_t rs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i64m8_tum(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vwmaccsu_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { +vint16mf4_t test_vwmaccsu_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs1, vuint8mf8_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i16mf4_tumu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vwmaccsu_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { +vint16mf4_t test_vwmaccsu_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + int8_t rs1, vuint8mf8_t vs2, + size_t vl) { return __riscv_vwmaccsu_vx_i16mf4_tumu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmaccsu_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { +vint16mf2_t test_vwmaccsu_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs1, vuint8mf4_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i16mf2_tumu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vwmaccsu_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { +vint16mf2_t test_vwmaccsu_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + int8_t rs1, vuint8mf4_t vs2, + size_t vl) { return __riscv_vwmaccsu_vx_i16mf2_tumu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmaccsu_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { +vint16m1_t test_vwmaccsu_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + vint8mf2_t vs1, vuint8mf2_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i16m1_tumu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vwmaccsu_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { +vint16m1_t test_vwmaccsu_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, int8_t rs1, + vuint8mf2_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i16m1_tumu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmaccsu_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { +vint16m2_t test_vwmaccsu_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, + vint8m1_t vs1, vuint8m1_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i16m2_tumu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vwmaccsu_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { +vint16m2_t test_vwmaccsu_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, int8_t rs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i16m2_tumu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmaccsu_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { +vint16m4_t test_vwmaccsu_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, + vint8m2_t vs1, vuint8m2_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i16m4_tumu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vwmaccsu_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { +vint16m4_t test_vwmaccsu_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, int8_t rs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i16m4_tumu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmaccsu_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { +vint16m8_t test_vwmaccsu_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, + vint8m4_t vs1, vuint8m4_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i16m8_tumu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vwmaccsu_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { +vint16m8_t test_vwmaccsu_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, int8_t rs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i16m8_tumu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmaccsu_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { +vint32mf2_t test_vwmaccsu_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i32mf2_tumu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vwmaccsu_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { +vint32mf2_t test_vwmaccsu_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + int16_t rs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vwmaccsu_vx_i32mf2_tumu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmaccsu_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { +vint32m1_t test_vwmaccsu_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vint16mf2_t vs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i32m1_tumu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vwmaccsu_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { +vint32m1_t test_vwmaccsu_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, int16_t rs1, + vuint16mf2_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i32m1_tumu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmaccsu_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { +vint32m2_t test_vwmaccsu_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + vint16m1_t vs1, vuint16m1_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i32m2_tumu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vwmaccsu_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { +vint32m2_t test_vwmaccsu_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, int16_t rs1, + vuint16m1_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i32m2_tumu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmaccsu_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { +vint32m4_t test_vwmaccsu_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, + vint16m2_t vs1, vuint16m2_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i32m4_tumu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vwmaccsu_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { +vint32m4_t test_vwmaccsu_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, int16_t rs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i32m4_tumu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmaccsu_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { +vint32m8_t test_vwmaccsu_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, + vint16m4_t vs1, vuint16m4_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i32m8_tumu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vwmaccsu_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { +vint32m8_t test_vwmaccsu_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, int16_t rs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i32m8_tumu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmaccsu_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { +vint64m1_t test_vwmaccsu_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + vint32mf2_t vs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i64m1_tumu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vwmaccsu_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { +vint64m1_t test_vwmaccsu_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, int32_t rs1, + vuint32mf2_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i64m1_tumu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmaccsu_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { +vint64m2_t test_vwmaccsu_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + vint32m1_t vs1, vuint32m1_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i64m2_tumu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vwmaccsu_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { +vint64m2_t test_vwmaccsu_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, int32_t rs1, + vuint32m1_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i64m2_tumu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmaccsu_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { +vint64m4_t test_vwmaccsu_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + vint32m2_t vs1, vuint32m2_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i64m4_tumu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vwmaccsu_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { +vint64m4_t test_vwmaccsu_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, int32_t rs1, + vuint32m2_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i64m4_tumu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmaccsu_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { +vint64m8_t test_vwmaccsu_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, + vint32m4_t vs1, vuint32m4_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i64m8_tumu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vwmaccsu_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { +vint64m8_t test_vwmaccsu_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, int32_t rs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i64m8_tumu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vwmaccsu_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { +vint16mf4_t test_vwmaccsu_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs1, vuint8mf8_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i16mf4_mu(vm, vd, vs1, vs2, vl); } -vint16mf4_t test_vwmaccsu_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, int8_t rs1, vuint8mf8_t vs2, size_t vl) { +vint16mf4_t test_vwmaccsu_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, int8_t rs1, + vuint8mf8_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i16mf4_mu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmaccsu_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { +vint16mf2_t test_vwmaccsu_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs1, vuint8mf4_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i16mf2_mu(vm, vd, vs1, vs2, vl); } -vint16mf2_t test_vwmaccsu_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, int8_t rs1, vuint8mf4_t vs2, size_t vl) { +vint16mf2_t test_vwmaccsu_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, int8_t rs1, + vuint8mf4_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i16mf2_mu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmaccsu_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { +vint16m1_t test_vwmaccsu_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, + vint8mf2_t vs1, vuint8mf2_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i16m1_mu(vm, vd, vs1, vs2, vl); } -vint16m1_t test_vwmaccsu_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, int8_t rs1, vuint8mf2_t vs2, size_t vl) { +vint16m1_t test_vwmaccsu_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, int8_t rs1, + vuint8mf2_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i16m1_mu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmaccsu_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs1, vuint8m1_t vs2, size_t vl) { +vint16m2_t test_vwmaccsu_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vwmaccsu_vv_i16m2_mu(vm, vd, vs1, vs2, vl); } -vint16m2_t test_vwmaccsu_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, int8_t rs1, vuint8m1_t vs2, size_t vl) { +vint16m2_t test_vwmaccsu_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, int8_t rs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i16m2_mu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmaccsu_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs1, vuint8m2_t vs2, size_t vl) { +vint16m4_t test_vwmaccsu_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vwmaccsu_vv_i16m4_mu(vm, vd, vs1, vs2, vl); } -vint16m4_t test_vwmaccsu_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, int8_t rs1, vuint8m2_t vs2, size_t vl) { +vint16m4_t test_vwmaccsu_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, int8_t rs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i16m4_mu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmaccsu_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs1, vuint8m4_t vs2, size_t vl) { +vint16m8_t test_vwmaccsu_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vwmaccsu_vv_i16m8_mu(vm, vd, vs1, vs2, vl); } -vint16m8_t test_vwmaccsu_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, int8_t rs1, vuint8m4_t vs2, size_t vl) { +vint16m8_t test_vwmaccsu_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, int8_t rs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i16m8_mu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmaccsu_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { +vint32mf2_t test_vwmaccsu_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i32mf2_mu(vm, vd, vs1, vs2, vl); } -vint32mf2_t test_vwmaccsu_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, int16_t rs1, vuint16mf4_t vs2, size_t vl) { +vint32mf2_t test_vwmaccsu_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + int16_t rs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vwmaccsu_vx_i32mf2_mu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmaccsu_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { +vint32m1_t test_vwmaccsu_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, + vint16mf2_t vs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i32m1_mu(vm, vd, vs1, vs2, vl); } -vint32m1_t test_vwmaccsu_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, int16_t rs1, vuint16mf2_t vs2, size_t vl) { +vint32m1_t test_vwmaccsu_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, int16_t rs1, + vuint16mf2_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i32m1_mu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmaccsu_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs1, vuint16m1_t vs2, size_t vl) { +vint32m2_t test_vwmaccsu_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, + vint16m1_t vs1, vuint16m1_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i32m2_mu(vm, vd, vs1, vs2, vl); } -vint32m2_t test_vwmaccsu_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, int16_t rs1, vuint16m1_t vs2, size_t vl) { +vint32m2_t test_vwmaccsu_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, int16_t rs1, + vuint16m1_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i32m2_mu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmaccsu_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs1, vuint16m2_t vs2, size_t vl) { +vint32m4_t test_vwmaccsu_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vwmaccsu_vv_i32m4_mu(vm, vd, vs1, vs2, vl); } -vint32m4_t test_vwmaccsu_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, int16_t rs1, vuint16m2_t vs2, size_t vl) { +vint32m4_t test_vwmaccsu_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, int16_t rs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i32m4_mu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmaccsu_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs1, vuint16m4_t vs2, size_t vl) { +vint32m8_t test_vwmaccsu_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vwmaccsu_vv_i32m8_mu(vm, vd, vs1, vs2, vl); } -vint32m8_t test_vwmaccsu_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, int16_t rs1, vuint16m4_t vs2, size_t vl) { +vint32m8_t test_vwmaccsu_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, int16_t rs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i32m8_mu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmaccsu_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { +vint64m1_t test_vwmaccsu_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, + vint32mf2_t vs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i64m1_mu(vm, vd, vs1, vs2, vl); } -vint64m1_t test_vwmaccsu_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, int32_t rs1, vuint32mf2_t vs2, size_t vl) { +vint64m1_t test_vwmaccsu_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, int32_t rs1, + vuint32mf2_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i64m1_mu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmaccsu_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs1, vuint32m1_t vs2, size_t vl) { +vint64m2_t test_vwmaccsu_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, + vint32m1_t vs1, vuint32m1_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i64m2_mu(vm, vd, vs1, vs2, vl); } -vint64m2_t test_vwmaccsu_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, int32_t rs1, vuint32m1_t vs2, size_t vl) { +vint64m2_t test_vwmaccsu_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, int32_t rs1, + vuint32m1_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i64m2_mu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmaccsu_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs1, vuint32m2_t vs2, size_t vl) { +vint64m4_t test_vwmaccsu_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, + vint32m2_t vs1, vuint32m2_t vs2, + size_t vl) { return __riscv_vwmaccsu_vv_i64m4_mu(vm, vd, vs1, vs2, vl); } -vint64m4_t test_vwmaccsu_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, int32_t rs1, vuint32m2_t vs2, size_t vl) { +vint64m4_t test_vwmaccsu_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, int32_t rs1, + vuint32m2_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i64m4_mu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmaccsu_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs1, vuint32m4_t vs2, size_t vl) { +vint64m8_t test_vwmaccsu_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vwmaccsu_vv_i64m8_mu(vm, vd, vs1, vs2, vl); } -vint64m8_t test_vwmaccsu_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, int32_t rs1, vuint32m4_t vs2, size_t vl) { +vint64m8_t test_vwmaccsu_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, int32_t rs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vwmaccsu_vx_i64m8_mu(vm, vd, rs1, vs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vwmaccu.c b/auto-generated/policy_funcs/llvm-api-tests/vwmaccu.c index 33b6f8c04..dffe2a64d 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vwmaccu.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vwmaccu.c @@ -1,487 +1,675 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vuint16mf4_t test_vwmaccu_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { +vuint16mf4_t test_vwmaccu_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs1, + vuint8mf8_t vs2, size_t vl) { return __riscv_vwmaccu_vv_u16mf4_tu(vd, vs1, vs2, vl); } -vuint16mf4_t test_vwmaccu_vx_u16mf4_tu(vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { +vuint16mf4_t test_vwmaccu_vx_u16mf4_tu(vuint16mf4_t vd, uint8_t rs1, + vuint8mf8_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u16mf4_tu(vd, rs1, vs2, vl); } -vuint16mf2_t test_vwmaccu_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { +vuint16mf2_t test_vwmaccu_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs1, + vuint8mf4_t vs2, size_t vl) { return __riscv_vwmaccu_vv_u16mf2_tu(vd, vs1, vs2, vl); } -vuint16mf2_t test_vwmaccu_vx_u16mf2_tu(vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { +vuint16mf2_t test_vwmaccu_vx_u16mf2_tu(vuint16mf2_t vd, uint8_t rs1, + vuint8mf4_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u16mf2_tu(vd, rs1, vs2, vl); } -vuint16m1_t test_vwmaccu_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { +vuint16m1_t test_vwmaccu_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs1, + vuint8mf2_t vs2, size_t vl) { return __riscv_vwmaccu_vv_u16m1_tu(vd, vs1, vs2, vl); } -vuint16m1_t test_vwmaccu_vx_u16m1_tu(vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { +vuint16m1_t test_vwmaccu_vx_u16m1_tu(vuint16m1_t vd, uint8_t rs1, + vuint8mf2_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u16m1_tu(vd, rs1, vs2, vl); } -vuint16m2_t test_vwmaccu_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { +vuint16m2_t test_vwmaccu_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vwmaccu_vv_u16m2_tu(vd, vs1, vs2, vl); } -vuint16m2_t test_vwmaccu_vx_u16m2_tu(vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { +vuint16m2_t test_vwmaccu_vx_u16m2_tu(vuint16m2_t vd, uint8_t rs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u16m2_tu(vd, rs1, vs2, vl); } -vuint16m4_t test_vwmaccu_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { +vuint16m4_t test_vwmaccu_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vwmaccu_vv_u16m4_tu(vd, vs1, vs2, vl); } -vuint16m4_t test_vwmaccu_vx_u16m4_tu(vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { +vuint16m4_t test_vwmaccu_vx_u16m4_tu(vuint16m4_t vd, uint8_t rs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u16m4_tu(vd, rs1, vs2, vl); } -vuint16m8_t test_vwmaccu_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { +vuint16m8_t test_vwmaccu_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vwmaccu_vv_u16m8_tu(vd, vs1, vs2, vl); } -vuint16m8_t test_vwmaccu_vx_u16m8_tu(vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { +vuint16m8_t test_vwmaccu_vx_u16m8_tu(vuint16m8_t vd, uint8_t rs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u16m8_tu(vd, rs1, vs2, vl); } -vuint32mf2_t test_vwmaccu_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { +vuint32mf2_t test_vwmaccu_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs1, + vuint16mf4_t vs2, size_t vl) { return __riscv_vwmaccu_vv_u32mf2_tu(vd, vs1, vs2, vl); } -vuint32mf2_t test_vwmaccu_vx_u32mf2_tu(vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { +vuint32mf2_t test_vwmaccu_vx_u32mf2_tu(vuint32mf2_t vd, uint16_t rs1, + vuint16mf4_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u32mf2_tu(vd, rs1, vs2, vl); } -vuint32m1_t test_vwmaccu_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { +vuint32m1_t test_vwmaccu_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs1, + vuint16mf2_t vs2, size_t vl) { return __riscv_vwmaccu_vv_u32m1_tu(vd, vs1, vs2, vl); } -vuint32m1_t test_vwmaccu_vx_u32m1_tu(vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { +vuint32m1_t test_vwmaccu_vx_u32m1_tu(vuint32m1_t vd, uint16_t rs1, + vuint16mf2_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u32m1_tu(vd, rs1, vs2, vl); } -vuint32m2_t test_vwmaccu_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { +vuint32m2_t test_vwmaccu_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs1, + vuint16m1_t vs2, size_t vl) { return __riscv_vwmaccu_vv_u32m2_tu(vd, vs1, vs2, vl); } -vuint32m2_t test_vwmaccu_vx_u32m2_tu(vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { +vuint32m2_t test_vwmaccu_vx_u32m2_tu(vuint32m2_t vd, uint16_t rs1, + vuint16m1_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u32m2_tu(vd, rs1, vs2, vl); } -vuint32m4_t test_vwmaccu_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { +vuint32m4_t test_vwmaccu_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vwmaccu_vv_u32m4_tu(vd, vs1, vs2, vl); } -vuint32m4_t test_vwmaccu_vx_u32m4_tu(vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { +vuint32m4_t test_vwmaccu_vx_u32m4_tu(vuint32m4_t vd, uint16_t rs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u32m4_tu(vd, rs1, vs2, vl); } -vuint32m8_t test_vwmaccu_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { +vuint32m8_t test_vwmaccu_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vwmaccu_vv_u32m8_tu(vd, vs1, vs2, vl); } -vuint32m8_t test_vwmaccu_vx_u32m8_tu(vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { +vuint32m8_t test_vwmaccu_vx_u32m8_tu(vuint32m8_t vd, uint16_t rs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u32m8_tu(vd, rs1, vs2, vl); } -vuint64m1_t test_vwmaccu_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { +vuint64m1_t test_vwmaccu_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs1, + vuint32mf2_t vs2, size_t vl) { return __riscv_vwmaccu_vv_u64m1_tu(vd, vs1, vs2, vl); } -vuint64m1_t test_vwmaccu_vx_u64m1_tu(vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { +vuint64m1_t test_vwmaccu_vx_u64m1_tu(vuint64m1_t vd, uint32_t rs1, + vuint32mf2_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u64m1_tu(vd, rs1, vs2, vl); } -vuint64m2_t test_vwmaccu_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { +vuint64m2_t test_vwmaccu_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs1, + vuint32m1_t vs2, size_t vl) { return __riscv_vwmaccu_vv_u64m2_tu(vd, vs1, vs2, vl); } -vuint64m2_t test_vwmaccu_vx_u64m2_tu(vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { +vuint64m2_t test_vwmaccu_vx_u64m2_tu(vuint64m2_t vd, uint32_t rs1, + vuint32m1_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u64m2_tu(vd, rs1, vs2, vl); } -vuint64m4_t test_vwmaccu_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { +vuint64m4_t test_vwmaccu_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs1, + vuint32m2_t vs2, size_t vl) { return __riscv_vwmaccu_vv_u64m4_tu(vd, vs1, vs2, vl); } -vuint64m4_t test_vwmaccu_vx_u64m4_tu(vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { +vuint64m4_t test_vwmaccu_vx_u64m4_tu(vuint64m4_t vd, uint32_t rs1, + vuint32m2_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u64m4_tu(vd, rs1, vs2, vl); } -vuint64m8_t test_vwmaccu_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { +vuint64m8_t test_vwmaccu_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vwmaccu_vv_u64m8_tu(vd, vs1, vs2, vl); } -vuint64m8_t test_vwmaccu_vx_u64m8_tu(vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { +vuint64m8_t test_vwmaccu_vx_u64m8_tu(vuint64m8_t vd, uint32_t rs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u64m8_tu(vd, rs1, vs2, vl); } -vuint16mf4_t test_vwmaccu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { +vuint16mf4_t test_vwmaccu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs1, vuint8mf8_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u16mf4_tum(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vwmaccu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { +vuint16mf4_t test_vwmaccu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + uint8_t rs1, vuint8mf8_t vs2, + size_t vl) { return __riscv_vwmaccu_vx_u16mf4_tum(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vwmaccu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { +vuint16mf2_t test_vwmaccu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs1, vuint8mf4_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u16mf2_tum(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vwmaccu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { +vuint16mf2_t test_vwmaccu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + uint8_t rs1, vuint8mf4_t vs2, + size_t vl) { return __riscv_vwmaccu_vx_u16mf2_tum(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vwmaccu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { +vuint16m1_t test_vwmaccu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs1, vuint8mf2_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u16m1_tum(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vwmaccu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { +vuint16m1_t test_vwmaccu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, uint8_t rs1, + vuint8mf2_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u16m1_tum(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vwmaccu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { +vuint16m2_t test_vwmaccu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs1, vuint8m1_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u16m2_tum(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vwmaccu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { +vuint16m2_t test_vwmaccu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, uint8_t rs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u16m2_tum(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vwmaccu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { +vuint16m4_t test_vwmaccu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs1, vuint8m2_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u16m4_tum(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vwmaccu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { +vuint16m4_t test_vwmaccu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, uint8_t rs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u16m4_tum(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vwmaccu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { +vuint16m8_t test_vwmaccu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs1, vuint8m4_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u16m8_tum(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vwmaccu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { +vuint16m8_t test_vwmaccu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, uint8_t rs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u16m8_tum(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vwmaccu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { +vuint32mf2_t test_vwmaccu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u32mf2_tum(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vwmaccu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { +vuint32mf2_t test_vwmaccu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + uint16_t rs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vwmaccu_vx_u32mf2_tum(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vwmaccu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { +vuint32m1_t test_vwmaccu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u32m1_tum(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vwmaccu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { +vuint32m1_t test_vwmaccu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + uint16_t rs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vwmaccu_vx_u32m1_tum(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vwmaccu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { +vuint32m2_t test_vwmaccu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs1, vuint16m1_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u32m2_tum(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vwmaccu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { +vuint32m2_t test_vwmaccu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + uint16_t rs1, vuint16m1_t vs2, + size_t vl) { return __riscv_vwmaccu_vx_u32m2_tum(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vwmaccu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { +vuint32m4_t test_vwmaccu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs1, vuint16m2_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u32m4_tum(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vwmaccu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { +vuint32m4_t test_vwmaccu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, uint16_t rs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u32m4_tum(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vwmaccu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { +vuint32m8_t test_vwmaccu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs1, vuint16m4_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u32m8_tum(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vwmaccu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { +vuint32m8_t test_vwmaccu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, uint16_t rs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u32m8_tum(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vwmaccu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { +vuint64m1_t test_vwmaccu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u64m1_tum(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vwmaccu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { +vuint64m1_t test_vwmaccu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + uint32_t rs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vwmaccu_vx_u64m1_tum(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vwmaccu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { +vuint64m2_t test_vwmaccu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs1, vuint32m1_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u64m2_tum(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vwmaccu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { +vuint64m2_t test_vwmaccu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + uint32_t rs1, vuint32m1_t vs2, + size_t vl) { return __riscv_vwmaccu_vx_u64m2_tum(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vwmaccu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { +vuint64m4_t test_vwmaccu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs1, vuint32m2_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u64m4_tum(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vwmaccu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { +vuint64m4_t test_vwmaccu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + uint32_t rs1, vuint32m2_t vs2, + size_t vl) { return __riscv_vwmaccu_vx_u64m4_tum(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vwmaccu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { +vuint64m8_t test_vwmaccu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs1, vuint32m4_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u64m8_tum(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vwmaccu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { +vuint64m8_t test_vwmaccu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, uint32_t rs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u64m8_tum(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vwmaccu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { +vuint16mf4_t test_vwmaccu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs1, vuint8mf8_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u16mf4_tumu(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vwmaccu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { +vuint16mf4_t test_vwmaccu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + uint8_t rs1, vuint8mf8_t vs2, + size_t vl) { return __riscv_vwmaccu_vx_u16mf4_tumu(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vwmaccu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { +vuint16mf2_t test_vwmaccu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs1, vuint8mf4_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u16mf2_tumu(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vwmaccu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { +vuint16mf2_t test_vwmaccu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + uint8_t rs1, vuint8mf4_t vs2, + size_t vl) { return __riscv_vwmaccu_vx_u16mf2_tumu(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vwmaccu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { +vuint16m1_t test_vwmaccu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs1, vuint8mf2_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u16m1_tumu(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vwmaccu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { +vuint16m1_t test_vwmaccu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + uint8_t rs1, vuint8mf2_t vs2, + size_t vl) { return __riscv_vwmaccu_vx_u16m1_tumu(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vwmaccu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { +vuint16m2_t test_vwmaccu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs1, vuint8m1_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u16m2_tumu(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vwmaccu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { +vuint16m2_t test_vwmaccu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, uint8_t rs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u16m2_tumu(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vwmaccu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { +vuint16m4_t test_vwmaccu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs1, vuint8m2_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u16m4_tumu(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vwmaccu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { +vuint16m4_t test_vwmaccu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, uint8_t rs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u16m4_tumu(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vwmaccu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { +vuint16m8_t test_vwmaccu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs1, vuint8m4_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u16m8_tumu(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vwmaccu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { +vuint16m8_t test_vwmaccu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, uint8_t rs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u16m8_tumu(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vwmaccu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { +vuint32mf2_t test_vwmaccu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u32mf2_tumu(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vwmaccu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { +vuint32mf2_t test_vwmaccu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + uint16_t rs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vwmaccu_vx_u32mf2_tumu(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vwmaccu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { +vuint32m1_t test_vwmaccu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u32m1_tumu(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vwmaccu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { +vuint32m1_t test_vwmaccu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + uint16_t rs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vwmaccu_vx_u32m1_tumu(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vwmaccu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { +vuint32m2_t test_vwmaccu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs1, vuint16m1_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u32m2_tumu(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vwmaccu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { +vuint32m2_t test_vwmaccu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + uint16_t rs1, vuint16m1_t vs2, + size_t vl) { return __riscv_vwmaccu_vx_u32m2_tumu(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vwmaccu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { +vuint32m4_t test_vwmaccu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs1, vuint16m2_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u32m4_tumu(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vwmaccu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { +vuint32m4_t test_vwmaccu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + uint16_t rs1, vuint16m2_t vs2, + size_t vl) { return __riscv_vwmaccu_vx_u32m4_tumu(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vwmaccu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { +vuint32m8_t test_vwmaccu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs1, vuint16m4_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u32m8_tumu(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vwmaccu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { +vuint32m8_t test_vwmaccu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + uint16_t rs1, vuint16m4_t vs2, + size_t vl) { return __riscv_vwmaccu_vx_u32m8_tumu(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vwmaccu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { +vuint64m1_t test_vwmaccu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u64m1_tumu(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vwmaccu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { +vuint64m1_t test_vwmaccu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + uint32_t rs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vwmaccu_vx_u64m1_tumu(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vwmaccu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { +vuint64m2_t test_vwmaccu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs1, vuint32m1_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u64m2_tumu(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vwmaccu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { +vuint64m2_t test_vwmaccu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + uint32_t rs1, vuint32m1_t vs2, + size_t vl) { return __riscv_vwmaccu_vx_u64m2_tumu(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vwmaccu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { +vuint64m4_t test_vwmaccu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs1, vuint32m2_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u64m4_tumu(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vwmaccu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { +vuint64m4_t test_vwmaccu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + uint32_t rs1, vuint32m2_t vs2, + size_t vl) { return __riscv_vwmaccu_vx_u64m4_tumu(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vwmaccu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { +vuint64m8_t test_vwmaccu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs1, vuint32m4_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u64m8_tumu(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vwmaccu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { +vuint64m8_t test_vwmaccu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + uint32_t rs1, vuint32m4_t vs2, + size_t vl) { return __riscv_vwmaccu_vx_u64m8_tumu(vm, vd, rs1, vs2, vl); } -vuint16mf4_t test_vwmaccu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs1, vuint8mf8_t vs2, size_t vl) { +vuint16mf4_t test_vwmaccu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs1, vuint8mf8_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u16mf4_mu(vm, vd, vs1, vs2, vl); } -vuint16mf4_t test_vwmaccu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, uint8_t rs1, vuint8mf8_t vs2, size_t vl) { +vuint16mf4_t test_vwmaccu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + uint8_t rs1, vuint8mf8_t vs2, + size_t vl) { return __riscv_vwmaccu_vx_u16mf4_mu(vm, vd, rs1, vs2, vl); } -vuint16mf2_t test_vwmaccu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs1, vuint8mf4_t vs2, size_t vl) { +vuint16mf2_t test_vwmaccu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs1, vuint8mf4_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u16mf2_mu(vm, vd, vs1, vs2, vl); } -vuint16mf2_t test_vwmaccu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, uint8_t rs1, vuint8mf4_t vs2, size_t vl) { +vuint16mf2_t test_vwmaccu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + uint8_t rs1, vuint8mf4_t vs2, + size_t vl) { return __riscv_vwmaccu_vx_u16mf2_mu(vm, vd, rs1, vs2, vl); } -vuint16m1_t test_vwmaccu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs1, vuint8mf2_t vs2, size_t vl) { +vuint16m1_t test_vwmaccu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs1, vuint8mf2_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u16m1_mu(vm, vd, vs1, vs2, vl); } -vuint16m1_t test_vwmaccu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, uint8_t rs1, vuint8mf2_t vs2, size_t vl) { +vuint16m1_t test_vwmaccu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, uint8_t rs1, + vuint8mf2_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u16m1_mu(vm, vd, rs1, vs2, vl); } -vuint16m2_t test_vwmaccu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs1, vuint8m1_t vs2, size_t vl) { +vuint16m2_t test_vwmaccu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs1, vuint8m1_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u16m2_mu(vm, vd, vs1, vs2, vl); } -vuint16m2_t test_vwmaccu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, uint8_t rs1, vuint8m1_t vs2, size_t vl) { +vuint16m2_t test_vwmaccu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, uint8_t rs1, + vuint8m1_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u16m2_mu(vm, vd, rs1, vs2, vl); } -vuint16m4_t test_vwmaccu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs1, vuint8m2_t vs2, size_t vl) { +vuint16m4_t test_vwmaccu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs1, vuint8m2_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u16m4_mu(vm, vd, vs1, vs2, vl); } -vuint16m4_t test_vwmaccu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, uint8_t rs1, vuint8m2_t vs2, size_t vl) { +vuint16m4_t test_vwmaccu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, uint8_t rs1, + vuint8m2_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u16m4_mu(vm, vd, rs1, vs2, vl); } -vuint16m8_t test_vwmaccu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs1, vuint8m4_t vs2, size_t vl) { +vuint16m8_t test_vwmaccu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs1, vuint8m4_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u16m8_mu(vm, vd, vs1, vs2, vl); } -vuint16m8_t test_vwmaccu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, uint8_t rs1, vuint8m4_t vs2, size_t vl) { +vuint16m8_t test_vwmaccu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, uint8_t rs1, + vuint8m4_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u16m8_mu(vm, vd, rs1, vs2, vl); } -vuint32mf2_t test_vwmaccu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs1, vuint16mf4_t vs2, size_t vl) { +vuint32mf2_t test_vwmaccu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u32mf2_mu(vm, vd, vs1, vs2, vl); } -vuint32mf2_t test_vwmaccu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, uint16_t rs1, vuint16mf4_t vs2, size_t vl) { +vuint32mf2_t test_vwmaccu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + uint16_t rs1, vuint16mf4_t vs2, + size_t vl) { return __riscv_vwmaccu_vx_u32mf2_mu(vm, vd, rs1, vs2, vl); } -vuint32m1_t test_vwmaccu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs1, vuint16mf2_t vs2, size_t vl) { +vuint32m1_t test_vwmaccu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs1, vuint16mf2_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u32m1_mu(vm, vd, vs1, vs2, vl); } -vuint32m1_t test_vwmaccu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, uint16_t rs1, vuint16mf2_t vs2, size_t vl) { +vuint32m1_t test_vwmaccu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, uint16_t rs1, + vuint16mf2_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u32m1_mu(vm, vd, rs1, vs2, vl); } -vuint32m2_t test_vwmaccu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs1, vuint16m1_t vs2, size_t vl) { +vuint32m2_t test_vwmaccu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs1, vuint16m1_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u32m2_mu(vm, vd, vs1, vs2, vl); } -vuint32m2_t test_vwmaccu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, uint16_t rs1, vuint16m1_t vs2, size_t vl) { +vuint32m2_t test_vwmaccu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, uint16_t rs1, + vuint16m1_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u32m2_mu(vm, vd, rs1, vs2, vl); } -vuint32m4_t test_vwmaccu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs1, vuint16m2_t vs2, size_t vl) { +vuint32m4_t test_vwmaccu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs1, vuint16m2_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u32m4_mu(vm, vd, vs1, vs2, vl); } -vuint32m4_t test_vwmaccu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, uint16_t rs1, vuint16m2_t vs2, size_t vl) { +vuint32m4_t test_vwmaccu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, uint16_t rs1, + vuint16m2_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u32m4_mu(vm, vd, rs1, vs2, vl); } -vuint32m8_t test_vwmaccu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs1, vuint16m4_t vs2, size_t vl) { +vuint32m8_t test_vwmaccu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs1, vuint16m4_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u32m8_mu(vm, vd, vs1, vs2, vl); } -vuint32m8_t test_vwmaccu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, uint16_t rs1, vuint16m4_t vs2, size_t vl) { +vuint32m8_t test_vwmaccu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, uint16_t rs1, + vuint16m4_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u32m8_mu(vm, vd, rs1, vs2, vl); } -vuint64m1_t test_vwmaccu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs1, vuint32mf2_t vs2, size_t vl) { +vuint64m1_t test_vwmaccu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs1, vuint32mf2_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u64m1_mu(vm, vd, vs1, vs2, vl); } -vuint64m1_t test_vwmaccu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, uint32_t rs1, vuint32mf2_t vs2, size_t vl) { +vuint64m1_t test_vwmaccu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, uint32_t rs1, + vuint32mf2_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u64m1_mu(vm, vd, rs1, vs2, vl); } -vuint64m2_t test_vwmaccu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs1, vuint32m1_t vs2, size_t vl) { +vuint64m2_t test_vwmaccu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs1, vuint32m1_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u64m2_mu(vm, vd, vs1, vs2, vl); } -vuint64m2_t test_vwmaccu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, uint32_t rs1, vuint32m1_t vs2, size_t vl) { +vuint64m2_t test_vwmaccu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, uint32_t rs1, + vuint32m1_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u64m2_mu(vm, vd, rs1, vs2, vl); } -vuint64m4_t test_vwmaccu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs1, vuint32m2_t vs2, size_t vl) { +vuint64m4_t test_vwmaccu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs1, vuint32m2_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u64m4_mu(vm, vd, vs1, vs2, vl); } -vuint64m4_t test_vwmaccu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, uint32_t rs1, vuint32m2_t vs2, size_t vl) { +vuint64m4_t test_vwmaccu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, uint32_t rs1, + vuint32m2_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u64m4_mu(vm, vd, rs1, vs2, vl); } -vuint64m8_t test_vwmaccu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs1, vuint32m4_t vs2, size_t vl) { +vuint64m8_t test_vwmaccu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs1, vuint32m4_t vs2, + size_t vl) { return __riscv_vwmaccu_vv_u64m8_mu(vm, vd, vs1, vs2, vl); } -vuint64m8_t test_vwmaccu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, uint32_t rs1, vuint32m4_t vs2, size_t vl) { +vuint64m8_t test_vwmaccu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, uint32_t rs1, + vuint32m4_t vs2, size_t vl) { return __riscv_vwmaccu_vx_u64m8_mu(vm, vd, rs1, vs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vwmaccus.c b/auto-generated/policy_funcs/llvm-api-tests/vwmaccus.c index 508919bab..21ee75c6b 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vwmaccus.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vwmaccus.c @@ -1,247 +1,319 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s #include -vint16mf4_t test_vwmaccus_vx_i16mf4_tu(vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { +vint16mf4_t test_vwmaccus_vx_i16mf4_tu(vint16mf4_t vd, uint8_t rs1, + vint8mf8_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i16mf4_tu(vd, rs1, vs2, vl); } -vint16mf2_t test_vwmaccus_vx_i16mf2_tu(vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { +vint16mf2_t test_vwmaccus_vx_i16mf2_tu(vint16mf2_t vd, uint8_t rs1, + vint8mf4_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i16mf2_tu(vd, rs1, vs2, vl); } -vint16m1_t test_vwmaccus_vx_i16m1_tu(vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { +vint16m1_t test_vwmaccus_vx_i16m1_tu(vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, + size_t vl) { return __riscv_vwmaccus_vx_i16m1_tu(vd, rs1, vs2, vl); } -vint16m2_t test_vwmaccus_vx_i16m2_tu(vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { +vint16m2_t test_vwmaccus_vx_i16m2_tu(vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, + size_t vl) { return __riscv_vwmaccus_vx_i16m2_tu(vd, rs1, vs2, vl); } -vint16m4_t test_vwmaccus_vx_i16m4_tu(vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { +vint16m4_t test_vwmaccus_vx_i16m4_tu(vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, + size_t vl) { return __riscv_vwmaccus_vx_i16m4_tu(vd, rs1, vs2, vl); } -vint16m8_t test_vwmaccus_vx_i16m8_tu(vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { +vint16m8_t test_vwmaccus_vx_i16m8_tu(vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, + size_t vl) { return __riscv_vwmaccus_vx_i16m8_tu(vd, rs1, vs2, vl); } -vint32mf2_t test_vwmaccus_vx_i32mf2_tu(vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { +vint32mf2_t test_vwmaccus_vx_i32mf2_tu(vint32mf2_t vd, uint16_t rs1, + vint16mf4_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i32mf2_tu(vd, rs1, vs2, vl); } -vint32m1_t test_vwmaccus_vx_i32m1_tu(vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { +vint32m1_t test_vwmaccus_vx_i32m1_tu(vint32m1_t vd, uint16_t rs1, + vint16mf2_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i32m1_tu(vd, rs1, vs2, vl); } -vint32m2_t test_vwmaccus_vx_i32m2_tu(vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { +vint32m2_t test_vwmaccus_vx_i32m2_tu(vint32m2_t vd, uint16_t rs1, + vint16m1_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i32m2_tu(vd, rs1, vs2, vl); } -vint32m4_t test_vwmaccus_vx_i32m4_tu(vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { +vint32m4_t test_vwmaccus_vx_i32m4_tu(vint32m4_t vd, uint16_t rs1, + vint16m2_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i32m4_tu(vd, rs1, vs2, vl); } -vint32m8_t test_vwmaccus_vx_i32m8_tu(vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { +vint32m8_t test_vwmaccus_vx_i32m8_tu(vint32m8_t vd, uint16_t rs1, + vint16m4_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i32m8_tu(vd, rs1, vs2, vl); } -vint64m1_t test_vwmaccus_vx_i64m1_tu(vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { +vint64m1_t test_vwmaccus_vx_i64m1_tu(vint64m1_t vd, uint32_t rs1, + vint32mf2_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i64m1_tu(vd, rs1, vs2, vl); } -vint64m2_t test_vwmaccus_vx_i64m2_tu(vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { +vint64m2_t test_vwmaccus_vx_i64m2_tu(vint64m2_t vd, uint32_t rs1, + vint32m1_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i64m2_tu(vd, rs1, vs2, vl); } -vint64m4_t test_vwmaccus_vx_i64m4_tu(vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { +vint64m4_t test_vwmaccus_vx_i64m4_tu(vint64m4_t vd, uint32_t rs1, + vint32m2_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i64m4_tu(vd, rs1, vs2, vl); } -vint64m8_t test_vwmaccus_vx_i64m8_tu(vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { +vint64m8_t test_vwmaccus_vx_i64m8_tu(vint64m8_t vd, uint32_t rs1, + vint32m4_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i64m8_tu(vd, rs1, vs2, vl); } -vint16mf4_t test_vwmaccus_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { +vint16mf4_t test_vwmaccus_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + uint8_t rs1, vint8mf8_t vs2, + size_t vl) { return __riscv_vwmaccus_vx_i16mf4_tum(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmaccus_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { +vint16mf2_t test_vwmaccus_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + uint8_t rs1, vint8mf4_t vs2, + size_t vl) { return __riscv_vwmaccus_vx_i16mf2_tum(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmaccus_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { +vint16m1_t test_vwmaccus_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, uint8_t rs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i16m1_tum(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmaccus_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { +vint16m2_t test_vwmaccus_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, uint8_t rs1, + vint8m1_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i16m2_tum(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmaccus_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { +vint16m4_t test_vwmaccus_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, uint8_t rs1, + vint8m2_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i16m4_tum(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmaccus_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { +vint16m8_t test_vwmaccus_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, uint8_t rs1, + vint8m4_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i16m8_tum(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmaccus_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { +vint32mf2_t test_vwmaccus_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + uint16_t rs1, vint16mf4_t vs2, + size_t vl) { return __riscv_vwmaccus_vx_i32mf2_tum(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmaccus_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { +vint32m1_t test_vwmaccus_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, uint16_t rs1, + vint16mf2_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i32m1_tum(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmaccus_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { +vint32m2_t test_vwmaccus_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, uint16_t rs1, + vint16m1_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i32m2_tum(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmaccus_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { +vint32m4_t test_vwmaccus_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, uint16_t rs1, + vint16m2_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i32m4_tum(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmaccus_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { +vint32m8_t test_vwmaccus_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, uint16_t rs1, + vint16m4_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i32m8_tum(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmaccus_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { +vint64m1_t test_vwmaccus_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, uint32_t rs1, + vint32mf2_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i64m1_tum(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmaccus_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { +vint64m2_t test_vwmaccus_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, uint32_t rs1, + vint32m1_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i64m2_tum(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmaccus_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { +vint64m4_t test_vwmaccus_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, uint32_t rs1, + vint32m2_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i64m4_tum(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmaccus_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { +vint64m8_t test_vwmaccus_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, uint32_t rs1, + vint32m4_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i64m8_tum(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vwmaccus_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { +vint16mf4_t test_vwmaccus_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + uint8_t rs1, vint8mf8_t vs2, + size_t vl) { return __riscv_vwmaccus_vx_i16mf4_tumu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmaccus_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { +vint16mf2_t test_vwmaccus_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + uint8_t rs1, vint8mf4_t vs2, + size_t vl) { return __riscv_vwmaccus_vx_i16mf2_tumu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmaccus_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { +vint16m1_t test_vwmaccus_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, uint8_t rs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i16m1_tumu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmaccus_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { +vint16m2_t test_vwmaccus_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, uint8_t rs1, + vint8m1_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i16m2_tumu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmaccus_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { +vint16m4_t test_vwmaccus_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, uint8_t rs1, + vint8m2_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i16m4_tumu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmaccus_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { +vint16m8_t test_vwmaccus_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, uint8_t rs1, + vint8m4_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i16m8_tumu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmaccus_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { +vint32mf2_t test_vwmaccus_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + uint16_t rs1, vint16mf4_t vs2, + size_t vl) { return __riscv_vwmaccus_vx_i32mf2_tumu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmaccus_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { +vint32m1_t test_vwmaccus_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + uint16_t rs1, vint16mf2_t vs2, + size_t vl) { return __riscv_vwmaccus_vx_i32m1_tumu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmaccus_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { +vint32m2_t test_vwmaccus_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + uint16_t rs1, vint16m1_t vs2, + size_t vl) { return __riscv_vwmaccus_vx_i32m2_tumu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmaccus_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { +vint32m4_t test_vwmaccus_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, uint16_t rs1, + vint16m2_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i32m4_tumu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmaccus_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { +vint32m8_t test_vwmaccus_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, uint16_t rs1, + vint16m4_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i32m8_tumu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmaccus_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { +vint64m1_t test_vwmaccus_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + uint32_t rs1, vint32mf2_t vs2, + size_t vl) { return __riscv_vwmaccus_vx_i64m1_tumu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmaccus_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { +vint64m2_t test_vwmaccus_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + uint32_t rs1, vint32m1_t vs2, + size_t vl) { return __riscv_vwmaccus_vx_i64m2_tumu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmaccus_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { +vint64m4_t test_vwmaccus_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + uint32_t rs1, vint32m2_t vs2, + size_t vl) { return __riscv_vwmaccus_vx_i64m4_tumu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmaccus_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { +vint64m8_t test_vwmaccus_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, uint32_t rs1, + vint32m4_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i64m8_tumu(vm, vd, rs1, vs2, vl); } -vint16mf4_t test_vwmaccus_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, uint8_t rs1, vint8mf8_t vs2, size_t vl) { +vint16mf4_t test_vwmaccus_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + uint8_t rs1, vint8mf8_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i16mf4_mu(vm, vd, rs1, vs2, vl); } -vint16mf2_t test_vwmaccus_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, uint8_t rs1, vint8mf4_t vs2, size_t vl) { +vint16mf2_t test_vwmaccus_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + uint8_t rs1, vint8mf4_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i16mf2_mu(vm, vd, rs1, vs2, vl); } -vint16m1_t test_vwmaccus_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, uint8_t rs1, vint8mf2_t vs2, size_t vl) { +vint16m1_t test_vwmaccus_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, uint8_t rs1, + vint8mf2_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i16m1_mu(vm, vd, rs1, vs2, vl); } -vint16m2_t test_vwmaccus_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, uint8_t rs1, vint8m1_t vs2, size_t vl) { +vint16m2_t test_vwmaccus_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, uint8_t rs1, + vint8m1_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i16m2_mu(vm, vd, rs1, vs2, vl); } -vint16m4_t test_vwmaccus_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, uint8_t rs1, vint8m2_t vs2, size_t vl) { +vint16m4_t test_vwmaccus_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, uint8_t rs1, + vint8m2_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i16m4_mu(vm, vd, rs1, vs2, vl); } -vint16m8_t test_vwmaccus_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, uint8_t rs1, vint8m4_t vs2, size_t vl) { +vint16m8_t test_vwmaccus_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, uint8_t rs1, + vint8m4_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i16m8_mu(vm, vd, rs1, vs2, vl); } -vint32mf2_t test_vwmaccus_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, uint16_t rs1, vint16mf4_t vs2, size_t vl) { +vint32mf2_t test_vwmaccus_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + uint16_t rs1, vint16mf4_t vs2, + size_t vl) { return __riscv_vwmaccus_vx_i32mf2_mu(vm, vd, rs1, vs2, vl); } -vint32m1_t test_vwmaccus_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, uint16_t rs1, vint16mf2_t vs2, size_t vl) { +vint32m1_t test_vwmaccus_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, uint16_t rs1, + vint16mf2_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i32m1_mu(vm, vd, rs1, vs2, vl); } -vint32m2_t test_vwmaccus_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, uint16_t rs1, vint16m1_t vs2, size_t vl) { +vint32m2_t test_vwmaccus_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, uint16_t rs1, + vint16m1_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i32m2_mu(vm, vd, rs1, vs2, vl); } -vint32m4_t test_vwmaccus_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, uint16_t rs1, vint16m2_t vs2, size_t vl) { +vint32m4_t test_vwmaccus_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, uint16_t rs1, + vint16m2_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i32m4_mu(vm, vd, rs1, vs2, vl); } -vint32m8_t test_vwmaccus_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, uint16_t rs1, vint16m4_t vs2, size_t vl) { +vint32m8_t test_vwmaccus_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, uint16_t rs1, + vint16m4_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i32m8_mu(vm, vd, rs1, vs2, vl); } -vint64m1_t test_vwmaccus_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, uint32_t rs1, vint32mf2_t vs2, size_t vl) { +vint64m1_t test_vwmaccus_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, uint32_t rs1, + vint32mf2_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i64m1_mu(vm, vd, rs1, vs2, vl); } -vint64m2_t test_vwmaccus_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, uint32_t rs1, vint32m1_t vs2, size_t vl) { +vint64m2_t test_vwmaccus_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, uint32_t rs1, + vint32m1_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i64m2_mu(vm, vd, rs1, vs2, vl); } -vint64m4_t test_vwmaccus_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, uint32_t rs1, vint32m2_t vs2, size_t vl) { +vint64m4_t test_vwmaccus_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, uint32_t rs1, + vint32m2_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i64m4_mu(vm, vd, rs1, vs2, vl); } -vint64m8_t test_vwmaccus_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, uint32_t rs1, vint32m4_t vs2, size_t vl) { +vint64m8_t test_vwmaccus_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, uint32_t rs1, + vint32m4_t vs2, size_t vl) { return __riscv_vwmaccus_vx_i64m8_mu(vm, vd, rs1, vs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vwmul.c b/auto-generated/policy_funcs/llvm-api-tests/vwmul.c index 72876a034..b8d015ad5 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vwmul.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vwmul.c @@ -5,482 +5,611 @@ #include -vint16mf4_t test_vwmul_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint16mf4_t test_vwmul_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vwmul_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vwmul_vx_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint16mf4_t test_vwmul_vx_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vwmul_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vwmul_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint16mf2_t test_vwmul_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vwmul_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vwmul_vx_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint16mf2_t test_vwmul_vx_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vwmul_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vwmul_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint16m1_t test_vwmul_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, + size_t vl) { return __riscv_vwmul_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwmul_vx_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint16m1_t test_vwmul_vx_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vwmul_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vwmul_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint16m2_t test_vwmul_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vwmul_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vwmul_vx_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint16m2_t test_vwmul_vx_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, int8_t rs1, + size_t vl) { return __riscv_vwmul_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vwmul_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint16m4_t test_vwmul_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, + size_t vl) { return __riscv_vwmul_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vwmul_vx_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint16m4_t test_vwmul_vx_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vwmul_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vwmul_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint16m8_t test_vwmul_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, + size_t vl) { return __riscv_vwmul_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vwmul_vx_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint16m8_t test_vwmul_vx_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vwmul_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vwmul_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint32mf2_t test_vwmul_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, + vint16mf4_t vs1, size_t vl) { return __riscv_vwmul_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vwmul_vx_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint32mf2_t test_vwmul_vx_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwmul_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vwmul_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint32m1_t test_vwmul_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vwmul_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwmul_vx_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint32m1_t test_vwmul_vx_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vwmul_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vwmul_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint32m2_t test_vwmul_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vwmul_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vwmul_vx_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint32m2_t test_vwmul_vx_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, int16_t rs1, + size_t vl) { return __riscv_vwmul_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vwmul_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint32m4_t test_vwmul_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, + size_t vl) { return __riscv_vwmul_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vwmul_vx_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint32m4_t test_vwmul_vx_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vwmul_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vwmul_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint32m8_t test_vwmul_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, + size_t vl) { return __riscv_vwmul_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vwmul_vx_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint32m8_t test_vwmul_vx_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vwmul_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vwmul_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint64m1_t test_vwmul_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vwmul_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwmul_vx_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint64m1_t test_vwmul_vx_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vwmul_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vwmul_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint64m2_t test_vwmul_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vwmul_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vwmul_vx_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint64m2_t test_vwmul_vx_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, int32_t rs1, + size_t vl) { return __riscv_vwmul_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vwmul_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint64m4_t test_vwmul_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, + size_t vl) { return __riscv_vwmul_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vwmul_vx_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint64m4_t test_vwmul_vx_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vwmul_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vwmul_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint64m8_t test_vwmul_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, + size_t vl) { return __riscv_vwmul_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vwmul_vx_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint64m8_t test_vwmul_vx_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, int32_t rs1, + size_t vl) { return __riscv_vwmul_vx_i64m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vwmul_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint16mf4_t test_vwmul_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs2, vint8mf8_t vs1, + size_t vl) { return __riscv_vwmul_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwmul_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint16mf4_t test_vwmul_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs2, int8_t rs1, size_t vl) { return __riscv_vwmul_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwmul_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint16mf2_t test_vwmul_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs2, vint8mf4_t vs1, + size_t vl) { return __riscv_vwmul_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwmul_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint16mf2_t test_vwmul_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs2, int8_t rs1, size_t vl) { return __riscv_vwmul_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwmul_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint16m1_t test_vwmul_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vwmul_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwmul_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint16m1_t test_vwmul_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwmul_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwmul_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint16m2_t test_vwmul_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vwmul_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwmul_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint16m2_t test_vwmul_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwmul_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwmul_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint16m4_t test_vwmul_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vwmul_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwmul_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint16m4_t test_vwmul_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwmul_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwmul_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint16m8_t test_vwmul_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vwmul_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwmul_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint16m8_t test_vwmul_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwmul_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwmul_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint32mf2_t test_vwmul_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vwmul_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwmul_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint32mf2_t test_vwmul_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vwmul_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwmul_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint32m1_t test_vwmul_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vwmul_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwmul_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint32m1_t test_vwmul_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwmul_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwmul_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint32m2_t test_vwmul_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vwmul_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwmul_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint32m2_t test_vwmul_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwmul_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwmul_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint32m4_t test_vwmul_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vwmul_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwmul_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint32m4_t test_vwmul_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwmul_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwmul_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint32m8_t test_vwmul_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vwmul_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwmul_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint32m8_t test_vwmul_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwmul_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwmul_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint64m1_t test_vwmul_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vwmul_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwmul_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint64m1_t test_vwmul_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwmul_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwmul_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint64m2_t test_vwmul_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vwmul_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwmul_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint64m2_t test_vwmul_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwmul_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwmul_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint64m4_t test_vwmul_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vwmul_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwmul_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint64m4_t test_vwmul_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwmul_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwmul_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint64m8_t test_vwmul_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vwmul_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwmul_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint64m8_t test_vwmul_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwmul_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwmul_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint16mf4_t test_vwmul_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs2, vint8mf8_t vs1, + size_t vl) { return __riscv_vwmul_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwmul_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint16mf4_t test_vwmul_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs2, int8_t rs1, size_t vl) { return __riscv_vwmul_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwmul_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint16mf2_t test_vwmul_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs2, vint8mf4_t vs1, + size_t vl) { return __riscv_vwmul_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwmul_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint16mf2_t test_vwmul_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs2, int8_t rs1, size_t vl) { return __riscv_vwmul_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwmul_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint16m1_t test_vwmul_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vwmul_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwmul_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint16m1_t test_vwmul_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwmul_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwmul_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint16m2_t test_vwmul_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vwmul_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwmul_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint16m2_t test_vwmul_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwmul_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwmul_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint16m4_t test_vwmul_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vwmul_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwmul_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint16m4_t test_vwmul_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwmul_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwmul_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint16m8_t test_vwmul_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vwmul_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwmul_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint16m8_t test_vwmul_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwmul_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwmul_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint32mf2_t test_vwmul_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vwmul_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwmul_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint32mf2_t test_vwmul_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vwmul_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwmul_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint32m1_t test_vwmul_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vwmul_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwmul_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint32m1_t test_vwmul_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vwmul_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwmul_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint32m2_t test_vwmul_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vwmul_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwmul_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint32m2_t test_vwmul_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwmul_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwmul_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint32m4_t test_vwmul_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vwmul_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwmul_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint32m4_t test_vwmul_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwmul_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwmul_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint32m8_t test_vwmul_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vwmul_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwmul_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint32m8_t test_vwmul_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwmul_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwmul_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint64m1_t test_vwmul_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vwmul_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwmul_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint64m1_t test_vwmul_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vwmul_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwmul_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint64m2_t test_vwmul_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vwmul_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwmul_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint64m2_t test_vwmul_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwmul_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwmul_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint64m4_t test_vwmul_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vwmul_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwmul_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint64m4_t test_vwmul_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwmul_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwmul_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint64m8_t test_vwmul_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vwmul_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwmul_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint64m8_t test_vwmul_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwmul_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwmul_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint16mf4_t test_vwmul_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { return __riscv_vwmul_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwmul_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint16mf4_t test_vwmul_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs2, int8_t rs1, size_t vl) { return __riscv_vwmul_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwmul_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint16mf2_t test_vwmul_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { return __riscv_vwmul_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwmul_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint16mf2_t test_vwmul_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs2, int8_t rs1, size_t vl) { return __riscv_vwmul_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwmul_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint16m1_t test_vwmul_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vwmul_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwmul_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint16m1_t test_vwmul_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwmul_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwmul_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint16m2_t test_vwmul_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vwmul_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwmul_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint16m2_t test_vwmul_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwmul_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwmul_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint16m4_t test_vwmul_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vwmul_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwmul_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint16m4_t test_vwmul_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwmul_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwmul_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint16m8_t test_vwmul_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vwmul_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwmul_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint16m8_t test_vwmul_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwmul_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwmul_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint32mf2_t test_vwmul_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vwmul_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwmul_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint32mf2_t test_vwmul_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vwmul_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwmul_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint32m1_t test_vwmul_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vwmul_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwmul_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint32m1_t test_vwmul_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwmul_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwmul_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint32m2_t test_vwmul_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vwmul_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwmul_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint32m2_t test_vwmul_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwmul_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwmul_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint32m4_t test_vwmul_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vwmul_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwmul_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint32m4_t test_vwmul_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwmul_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwmul_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint32m8_t test_vwmul_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vwmul_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwmul_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint32m8_t test_vwmul_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwmul_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwmul_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint64m1_t test_vwmul_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vwmul_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwmul_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint64m1_t test_vwmul_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwmul_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwmul_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint64m2_t test_vwmul_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vwmul_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwmul_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint64m2_t test_vwmul_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwmul_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwmul_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint64m4_t test_vwmul_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vwmul_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwmul_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint64m4_t test_vwmul_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwmul_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwmul_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint64m8_t test_vwmul_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vwmul_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwmul_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint64m8_t test_vwmul_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwmul_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vwmulsu.c b/auto-generated/policy_funcs/llvm-api-tests/vwmulsu.c index 4d0a884b2..d3f726780 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vwmulsu.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vwmulsu.c @@ -5,482 +5,635 @@ #include -vint16mf4_t test_vwmulsu_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vint16mf4_t test_vwmulsu_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vwmulsu_vx_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, uint8_t rs1, size_t vl) { +vint16mf4_t test_vwmulsu_vx_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vwmulsu_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vint16mf2_t test_vwmulsu_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vwmulsu_vx_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, uint8_t rs1, size_t vl) { +vint16mf2_t test_vwmulsu_vx_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vwmulsu_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vint16m1_t test_vwmulsu_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwmulsu_vx_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, uint8_t rs1, size_t vl) { +vint16m1_t test_vwmulsu_vx_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwmulsu_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vwmulsu_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vint16m2_t test_vwmulsu_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vwmulsu_vx_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, uint8_t rs1, size_t vl) { +vint16m2_t test_vwmulsu_vx_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwmulsu_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vwmulsu_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vint16m4_t test_vwmulsu_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vwmulsu_vx_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, uint8_t rs1, size_t vl) { +vint16m4_t test_vwmulsu_vx_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwmulsu_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vwmulsu_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vint16m8_t test_vwmulsu_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vwmulsu_vx_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, uint8_t rs1, size_t vl) { +vint16m8_t test_vwmulsu_vx_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwmulsu_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vwmulsu_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vint32mf2_t test_vwmulsu_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vwmulsu_vx_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, uint16_t rs1, size_t vl) { +vint32mf2_t test_vwmulsu_vx_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vwmulsu_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint32m1_t test_vwmulsu_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwmulsu_vx_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, uint16_t rs1, size_t vl) { +vint32m1_t test_vwmulsu_vx_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vwmulsu_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vint32m2_t test_vwmulsu_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vwmulsu_vx_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, uint16_t rs1, size_t vl) { +vint32m2_t test_vwmulsu_vx_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwmulsu_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vwmulsu_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vint32m4_t test_vwmulsu_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vwmulsu_vx_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, uint16_t rs1, size_t vl) { +vint32m4_t test_vwmulsu_vx_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwmulsu_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vwmulsu_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vint32m8_t test_vwmulsu_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vwmulsu_vx_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, uint16_t rs1, size_t vl) { +vint32m8_t test_vwmulsu_vx_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwmulsu_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vwmulsu_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vint64m1_t test_vwmulsu_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwmulsu_vx_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, uint32_t rs1, size_t vl) { +vint64m1_t test_vwmulsu_vx_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vwmulsu_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vint64m2_t test_vwmulsu_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vwmulsu_vx_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, uint32_t rs1, size_t vl) { +vint64m2_t test_vwmulsu_vx_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vwmulsu_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vwmulsu_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vint64m4_t test_vwmulsu_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vwmulsu_vx_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, uint32_t rs1, size_t vl) { +vint64m4_t test_vwmulsu_vx_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vwmulsu_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vwmulsu_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vint64m8_t test_vwmulsu_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vwmulsu_vx_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, uint32_t rs1, size_t vl) { +vint64m8_t test_vwmulsu_vx_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vwmulsu_vx_i64m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vwmulsu_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vint16mf4_t test_vwmulsu_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vwmulsu_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwmulsu_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, uint8_t rs1, size_t vl) { +vint16mf4_t test_vwmulsu_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwmulsu_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vint16mf2_t test_vwmulsu_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vwmulsu_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwmulsu_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, uint8_t rs1, size_t vl) { +vint16mf2_t test_vwmulsu_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwmulsu_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vint16m1_t test_vwmulsu_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, + vint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vwmulsu_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwmulsu_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, uint8_t rs1, size_t vl) { +vint16m1_t test_vwmulsu_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, + vint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwmulsu_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vint16m2_t test_vwmulsu_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwmulsu_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, uint8_t rs1, size_t vl) { +vint16m2_t test_vwmulsu_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwmulsu_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vint16m4_t test_vwmulsu_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwmulsu_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, uint8_t rs1, size_t vl) { +vint16m4_t test_vwmulsu_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwmulsu_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vint16m8_t test_vwmulsu_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwmulsu_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, uint8_t rs1, size_t vl) { +vint16m8_t test_vwmulsu_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwmulsu_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vint32mf2_t test_vwmulsu_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vwmulsu_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwmulsu_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, uint16_t rs1, size_t vl) { +vint32mf2_t test_vwmulsu_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwmulsu_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwmulsu_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint32m1_t test_vwmulsu_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, + vint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vwmulsu_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwmulsu_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, uint16_t rs1, size_t vl) { +vint32m1_t test_vwmulsu_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, + vint16mf2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwmulsu_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vint32m2_t test_vwmulsu_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, + vint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vwmulsu_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwmulsu_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, uint16_t rs1, size_t vl) { +vint32m2_t test_vwmulsu_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, + vint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwmulsu_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vint32m4_t test_vwmulsu_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwmulsu_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, uint16_t rs1, size_t vl) { +vint32m4_t test_vwmulsu_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwmulsu_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vint32m8_t test_vwmulsu_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwmulsu_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, uint16_t rs1, size_t vl) { +vint32m8_t test_vwmulsu_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwmulsu_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vint64m1_t test_vwmulsu_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, + vint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vwmulsu_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwmulsu_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, uint32_t rs1, size_t vl) { +vint64m1_t test_vwmulsu_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, + vint32mf2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwmulsu_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vint64m2_t test_vwmulsu_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, + vint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vwmulsu_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwmulsu_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, uint32_t rs1, size_t vl) { +vint64m2_t test_vwmulsu_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, + vint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwmulsu_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vint64m4_t test_vwmulsu_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, + vint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vwmulsu_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwmulsu_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, uint32_t rs1, size_t vl) { +vint64m4_t test_vwmulsu_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, + vint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwmulsu_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vint64m8_t test_vwmulsu_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwmulsu_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, uint32_t rs1, size_t vl) { +vint64m8_t test_vwmulsu_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwmulsu_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vint16mf4_t test_vwmulsu_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vwmulsu_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwmulsu_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, uint8_t rs1, size_t vl) { +vint16mf4_t test_vwmulsu_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwmulsu_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwmulsu_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vint16mf2_t test_vwmulsu_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vwmulsu_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwmulsu_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, uint8_t rs1, size_t vl) { +vint16mf2_t test_vwmulsu_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwmulsu_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwmulsu_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vint16m1_t test_vwmulsu_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + vint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vwmulsu_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwmulsu_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, uint8_t rs1, size_t vl) { +vint16m1_t test_vwmulsu_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, + vint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwmulsu_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vint16m2_t test_vwmulsu_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwmulsu_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, uint8_t rs1, size_t vl) { +vint16m2_t test_vwmulsu_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwmulsu_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vint16m4_t test_vwmulsu_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwmulsu_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, uint8_t rs1, size_t vl) { +vint16m4_t test_vwmulsu_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwmulsu_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vint16m8_t test_vwmulsu_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwmulsu_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, uint8_t rs1, size_t vl) { +vint16m8_t test_vwmulsu_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwmulsu_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vint32mf2_t test_vwmulsu_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vwmulsu_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwmulsu_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, uint16_t rs1, size_t vl) { +vint32mf2_t test_vwmulsu_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwmulsu_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwmulsu_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint32m1_t test_vwmulsu_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vwmulsu_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwmulsu_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, uint16_t rs1, size_t vl) { +vint32m1_t test_vwmulsu_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwmulsu_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwmulsu_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vint32m2_t test_vwmulsu_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + vint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vwmulsu_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwmulsu_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, uint16_t rs1, size_t vl) { +vint32m2_t test_vwmulsu_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, + vint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwmulsu_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vint32m4_t test_vwmulsu_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, + vint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vwmulsu_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwmulsu_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, uint16_t rs1, size_t vl) { +vint32m4_t test_vwmulsu_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, + vint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwmulsu_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vint32m8_t test_vwmulsu_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, + vint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vwmulsu_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwmulsu_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, uint16_t rs1, size_t vl) { +vint32m8_t test_vwmulsu_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, + vint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwmulsu_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vint64m1_t test_vwmulsu_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + vint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vwmulsu_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwmulsu_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, uint32_t rs1, size_t vl) { +vint64m1_t test_vwmulsu_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + vint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vwmulsu_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwmulsu_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vint64m2_t test_vwmulsu_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + vint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vwmulsu_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwmulsu_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, uint32_t rs1, size_t vl) { +vint64m2_t test_vwmulsu_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, + vint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwmulsu_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vint64m4_t test_vwmulsu_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + vint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vwmulsu_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwmulsu_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, uint32_t rs1, size_t vl) { +vint64m4_t test_vwmulsu_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, + vint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwmulsu_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vint64m8_t test_vwmulsu_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, + vint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vwmulsu_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwmulsu_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, uint32_t rs1, size_t vl) { +vint64m8_t test_vwmulsu_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, + vint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwmulsu_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vint16mf4_t test_vwmulsu_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vwmulsu_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwmulsu_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, uint8_t rs1, size_t vl) { +vint16mf4_t test_vwmulsu_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwmulsu_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vint16mf2_t test_vwmulsu_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vwmulsu_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwmulsu_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, uint8_t rs1, size_t vl) { +vint16mf2_t test_vwmulsu_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwmulsu_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vint16m1_t test_vwmulsu_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwmulsu_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, uint8_t rs1, size_t vl) { +vint16m1_t test_vwmulsu_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwmulsu_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vint16m2_t test_vwmulsu_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwmulsu_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, uint8_t rs1, size_t vl) { +vint16m2_t test_vwmulsu_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwmulsu_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vint16m4_t test_vwmulsu_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwmulsu_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, uint8_t rs1, size_t vl) { +vint16m4_t test_vwmulsu_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwmulsu_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vint16m8_t test_vwmulsu_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwmulsu_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, uint8_t rs1, size_t vl) { +vint16m8_t test_vwmulsu_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwmulsu_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vint32mf2_t test_vwmulsu_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vwmulsu_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwmulsu_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, uint16_t rs1, size_t vl) { +vint32mf2_t test_vwmulsu_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwmulsu_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwmulsu_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vint32m1_t test_vwmulsu_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, + vint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vwmulsu_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwmulsu_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, uint16_t rs1, size_t vl) { +vint32m1_t test_vwmulsu_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, + vint16mf2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwmulsu_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vint32m2_t test_vwmulsu_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwmulsu_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, uint16_t rs1, size_t vl) { +vint32m2_t test_vwmulsu_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwmulsu_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vint32m4_t test_vwmulsu_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwmulsu_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, uint16_t rs1, size_t vl) { +vint32m4_t test_vwmulsu_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwmulsu_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vint32m8_t test_vwmulsu_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwmulsu_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, uint16_t rs1, size_t vl) { +vint32m8_t test_vwmulsu_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwmulsu_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vint64m1_t test_vwmulsu_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, + vint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vwmulsu_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwmulsu_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, uint32_t rs1, size_t vl) { +vint64m1_t test_vwmulsu_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, + vint32mf2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwmulsu_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vint64m2_t test_vwmulsu_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwmulsu_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, uint32_t rs1, size_t vl) { +vint64m2_t test_vwmulsu_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwmulsu_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vint64m4_t test_vwmulsu_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwmulsu_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, uint32_t rs1, size_t vl) { +vint64m4_t test_vwmulsu_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwmulsu_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vint64m8_t test_vwmulsu_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vwmulsu_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwmulsu_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, uint32_t rs1, size_t vl) { +vint64m8_t test_vwmulsu_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vwmulsu_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vwmulu.c b/auto-generated/policy_funcs/llvm-api-tests/vwmulu.c index 4ac20c220..373536210 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vwmulu.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vwmulu.c @@ -5,482 +5,661 @@ #include -vuint16mf4_t test_vwmulu_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwmulu_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vwmulu_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vwmulu_vx_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint16mf4_t test_vwmulu_vx_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwmulu_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vwmulu_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwmulu_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vwmulu_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vwmulu_vx_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint16mf2_t test_vwmulu_vx_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwmulu_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vwmulu_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwmulu_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vwmulu_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwmulu_vx_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint16m1_t test_vwmulu_vx_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwmulu_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vwmulu_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwmulu_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vwmulu_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vwmulu_vx_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint16m2_t test_vwmulu_vx_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwmulu_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vwmulu_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwmulu_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vwmulu_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vwmulu_vx_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint16m4_t test_vwmulu_vx_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwmulu_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vwmulu_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwmulu_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vwmulu_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vwmulu_vx_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint16m8_t test_vwmulu_vx_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwmulu_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vwmulu_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwmulu_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vwmulu_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vwmulu_vx_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint32mf2_t test_vwmulu_vx_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwmulu_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vwmulu_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwmulu_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vwmulu_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwmulu_vx_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint32m1_t test_vwmulu_vx_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwmulu_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vwmulu_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwmulu_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vwmulu_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vwmulu_vx_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint32m2_t test_vwmulu_vx_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwmulu_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vwmulu_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwmulu_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vwmulu_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vwmulu_vx_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint32m4_t test_vwmulu_vx_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwmulu_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vwmulu_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwmulu_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vwmulu_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vwmulu_vx_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint32m8_t test_vwmulu_vx_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwmulu_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vwmulu_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwmulu_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vwmulu_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwmulu_vx_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint64m1_t test_vwmulu_vx_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vwmulu_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vwmulu_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwmulu_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vwmulu_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vwmulu_vx_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint64m2_t test_vwmulu_vx_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vwmulu_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vwmulu_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwmulu_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vwmulu_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vwmulu_vx_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint64m4_t test_vwmulu_vx_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vwmulu_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vwmulu_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwmulu_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vwmulu_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vwmulu_vx_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint64m8_t test_vwmulu_vx_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vwmulu_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vwmulu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwmulu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwmulu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint16mf4_t test_vwmulu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwmulu_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwmulu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwmulu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwmulu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint16mf2_t test_vwmulu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwmulu_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwmulu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwmulu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwmulu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint16m1_t test_vwmulu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwmulu_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwmulu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwmulu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwmulu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint16m2_t test_vwmulu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwmulu_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwmulu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwmulu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwmulu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint16m4_t test_vwmulu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwmulu_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwmulu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwmulu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwmulu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint16m8_t test_vwmulu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwmulu_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwmulu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwmulu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwmulu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint32mf2_t test_vwmulu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwmulu_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwmulu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwmulu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwmulu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint32m1_t test_vwmulu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwmulu_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwmulu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwmulu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwmulu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint32m2_t test_vwmulu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwmulu_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwmulu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwmulu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwmulu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint32m4_t test_vwmulu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwmulu_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwmulu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwmulu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwmulu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint32m8_t test_vwmulu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwmulu_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwmulu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwmulu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwmulu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint64m1_t test_vwmulu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vwmulu_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwmulu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwmulu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwmulu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint64m2_t test_vwmulu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwmulu_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwmulu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwmulu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwmulu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint64m4_t test_vwmulu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwmulu_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwmulu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwmulu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwmulu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint64m8_t test_vwmulu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwmulu_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwmulu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwmulu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwmulu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint16mf4_t test_vwmulu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwmulu_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwmulu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwmulu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwmulu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint16mf2_t test_vwmulu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwmulu_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwmulu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwmulu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwmulu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint16m1_t test_vwmulu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwmulu_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwmulu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwmulu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwmulu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint16m2_t test_vwmulu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwmulu_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwmulu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwmulu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwmulu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint16m4_t test_vwmulu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwmulu_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwmulu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwmulu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwmulu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint16m8_t test_vwmulu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwmulu_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwmulu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwmulu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwmulu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint32mf2_t test_vwmulu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwmulu_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwmulu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwmulu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwmulu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint32m1_t test_vwmulu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwmulu_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwmulu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwmulu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwmulu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint32m2_t test_vwmulu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwmulu_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwmulu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwmulu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwmulu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint32m4_t test_vwmulu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwmulu_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwmulu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwmulu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwmulu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint32m8_t test_vwmulu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwmulu_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwmulu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwmulu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwmulu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint64m1_t test_vwmulu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vwmulu_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwmulu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwmulu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwmulu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint64m2_t test_vwmulu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vwmulu_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwmulu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwmulu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwmulu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint64m4_t test_vwmulu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vwmulu_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwmulu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwmulu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwmulu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint64m8_t test_vwmulu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vwmulu_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwmulu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwmulu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwmulu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint16mf4_t test_vwmulu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwmulu_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwmulu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwmulu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwmulu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint16mf2_t test_vwmulu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwmulu_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwmulu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwmulu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwmulu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint16m1_t test_vwmulu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwmulu_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwmulu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwmulu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vwmulu_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwmulu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint16m2_t test_vwmulu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwmulu_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwmulu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwmulu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vwmulu_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwmulu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint16m4_t test_vwmulu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwmulu_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwmulu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwmulu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vwmulu_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwmulu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint16m8_t test_vwmulu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwmulu_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwmulu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwmulu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwmulu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint32mf2_t test_vwmulu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwmulu_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwmulu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwmulu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwmulu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint32m1_t test_vwmulu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwmulu_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwmulu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwmulu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwmulu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint32m2_t test_vwmulu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwmulu_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwmulu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwmulu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwmulu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint32m4_t test_vwmulu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwmulu_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwmulu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwmulu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwmulu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint32m8_t test_vwmulu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwmulu_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwmulu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwmulu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwmulu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint64m1_t test_vwmulu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwmulu_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwmulu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwmulu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwmulu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint64m2_t test_vwmulu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwmulu_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwmulu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwmulu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwmulu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint64m4_t test_vwmulu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwmulu_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwmulu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwmulu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vwmulu_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwmulu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint64m8_t test_vwmulu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwmulu_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vwredsum.c b/auto-generated/policy_funcs/llvm-api-tests/vwredsum.c index 5d9cfb5d8..6f40234d2 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vwredsum.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vwredsum.c @@ -5,146 +5,200 @@ #include -vint16m1_t test_vwredsum_vs_i8mf8_i16m1_tu(vint16m1_t vd, vint8mf8_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vwredsum_vs_i8mf8_i16m1_tu(vint16m1_t vd, vint8mf8_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vwredsum_vs_i8mf8_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8mf4_i16m1_tu(vint16m1_t vd, vint8mf4_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vwredsum_vs_i8mf4_i16m1_tu(vint16m1_t vd, vint8mf4_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vwredsum_vs_i8mf4_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8mf2_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vwredsum_vs_i8mf2_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vwredsum_vs_i8mf2_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m1_i16m1_tu(vint16m1_t vd, vint8m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vwredsum_vs_i8m1_i16m1_tu(vint16m1_t vd, vint8m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vwredsum_vs_i8m1_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m2_i16m1_tu(vint16m1_t vd, vint8m2_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vwredsum_vs_i8m2_i16m1_tu(vint16m1_t vd, vint8m2_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vwredsum_vs_i8m2_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m4_i16m1_tu(vint16m1_t vd, vint8m4_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vwredsum_vs_i8m4_i16m1_tu(vint16m1_t vd, vint8m4_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vwredsum_vs_i8m4_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m8_i16m1_tu(vint16m1_t vd, vint8m8_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vwredsum_vs_i8m8_i16m1_tu(vint16m1_t vd, vint8m8_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vwredsum_vs_i8m8_i16m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16mf4_i32m1_tu(vint32m1_t vd, vint16mf4_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vwredsum_vs_i16mf4_i32m1_tu(vint32m1_t vd, vint16mf4_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vwredsum_vs_i16mf4_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16mf2_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vwredsum_vs_i16mf2_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vwredsum_vs_i16mf2_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m1_i32m1_tu(vint32m1_t vd, vint16m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vwredsum_vs_i16m1_i32m1_tu(vint32m1_t vd, vint16m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vwredsum_vs_i16m1_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m2_i32m1_tu(vint32m1_t vd, vint16m2_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vwredsum_vs_i16m2_i32m1_tu(vint32m1_t vd, vint16m2_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vwredsum_vs_i16m2_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m4_i32m1_tu(vint32m1_t vd, vint16m4_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vwredsum_vs_i16m4_i32m1_tu(vint32m1_t vd, vint16m4_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vwredsum_vs_i16m4_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m8_i32m1_tu(vint32m1_t vd, vint16m8_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vwredsum_vs_i16m8_i32m1_tu(vint32m1_t vd, vint16m8_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vwredsum_vs_i16m8_i32m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vwredsum_vs_i32mf2_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m1_i64m1_tu(vint64m1_t vd, vint32m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vwredsum_vs_i32m1_i64m1_tu(vint64m1_t vd, vint32m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vwredsum_vs_i32m1_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m2_i64m1_tu(vint64m1_t vd, vint32m2_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vwredsum_vs_i32m2_i64m1_tu(vint64m1_t vd, vint32m2_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vwredsum_vs_i32m2_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m4_i64m1_tu(vint64m1_t vd, vint32m4_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vwredsum_vs_i32m4_i64m1_tu(vint64m1_t vd, vint32m4_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vwredsum_vs_i32m4_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m8_i64m1_tu(vint64m1_t vd, vint32m8_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vwredsum_vs_i32m8_i64m1_tu(vint64m1_t vd, vint32m8_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vwredsum_vs_i32m8_i64m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8mf8_i16m1_tum(vbool64_t vm, vint16m1_t vd, vint8mf8_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vwredsum_vs_i8mf8_i16m1_tum(vbool64_t vm, vint16m1_t vd, + vint8mf8_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vwredsum_vs_i8mf8_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8mf4_i16m1_tum(vbool32_t vm, vint16m1_t vd, vint8mf4_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vwredsum_vs_i8mf4_i16m1_tum(vbool32_t vm, vint16m1_t vd, + vint8mf4_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vwredsum_vs_i8mf4_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8mf2_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vwredsum_vs_i8mf2_i16m1_tum(vbool16_t vm, vint16m1_t vd, + vint8mf2_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vwredsum_vs_i8mf2_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m1_i16m1_tum(vbool8_t vm, vint16m1_t vd, vint8m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vwredsum_vs_i8m1_i16m1_tum(vbool8_t vm, vint16m1_t vd, + vint8m1_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vwredsum_vs_i8m1_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m2_i16m1_tum(vbool4_t vm, vint16m1_t vd, vint8m2_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vwredsum_vs_i8m2_i16m1_tum(vbool4_t vm, vint16m1_t vd, + vint8m2_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vwredsum_vs_i8m2_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m4_i16m1_tum(vbool2_t vm, vint16m1_t vd, vint8m4_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vwredsum_vs_i8m4_i16m1_tum(vbool2_t vm, vint16m1_t vd, + vint8m4_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vwredsum_vs_i8m4_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwredsum_vs_i8m8_i16m1_tum(vbool1_t vm, vint16m1_t vd, vint8m8_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vwredsum_vs_i8m8_i16m1_tum(vbool1_t vm, vint16m1_t vd, + vint8m8_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vwredsum_vs_i8m8_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16mf4_i32m1_tum(vbool64_t vm, vint32m1_t vd, vint16mf4_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vwredsum_vs_i16mf4_i32m1_tum(vbool64_t vm, vint32m1_t vd, + vint16mf4_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vwredsum_vs_i16mf4_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16mf2_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vwredsum_vs_i16mf2_i32m1_tum(vbool32_t vm, vint32m1_t vd, + vint16mf2_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vwredsum_vs_i16mf2_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m1_i32m1_tum(vbool16_t vm, vint32m1_t vd, vint16m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vwredsum_vs_i16m1_i32m1_tum(vbool16_t vm, vint32m1_t vd, + vint16m1_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vwredsum_vs_i16m1_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m2_i32m1_tum(vbool8_t vm, vint32m1_t vd, vint16m2_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vwredsum_vs_i16m2_i32m1_tum(vbool8_t vm, vint32m1_t vd, + vint16m2_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vwredsum_vs_i16m2_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m4_i32m1_tum(vbool4_t vm, vint32m1_t vd, vint16m4_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vwredsum_vs_i16m4_i32m1_tum(vbool4_t vm, vint32m1_t vd, + vint16m4_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vwredsum_vs_i16m4_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwredsum_vs_i16m8_i32m1_tum(vbool2_t vm, vint32m1_t vd, vint16m8_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vwredsum_vs_i16m8_i32m1_tum(vbool2_t vm, vint32m1_t vd, + vint16m8_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vwredsum_vs_i16m8_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vwredsum_vs_i32mf2_i64m1_tum(vbool64_t vm, vint64m1_t vd, + vint32mf2_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vwredsum_vs_i32mf2_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m1_i64m1_tum(vbool32_t vm, vint64m1_t vd, vint32m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vwredsum_vs_i32m1_i64m1_tum(vbool32_t vm, vint64m1_t vd, + vint32m1_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vwredsum_vs_i32m1_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m2_i64m1_tum(vbool16_t vm, vint64m1_t vd, vint32m2_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vwredsum_vs_i32m2_i64m1_tum(vbool16_t vm, vint64m1_t vd, + vint32m2_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vwredsum_vs_i32m2_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m4_i64m1_tum(vbool8_t vm, vint64m1_t vd, vint32m4_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vwredsum_vs_i32m4_i64m1_tum(vbool8_t vm, vint64m1_t vd, + vint32m4_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vwredsum_vs_i32m4_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwredsum_vs_i32m8_i64m1_tum(vbool4_t vm, vint64m1_t vd, vint32m8_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vwredsum_vs_i32m8_i64m1_tum(vbool4_t vm, vint64m1_t vd, + vint32m8_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vwredsum_vs_i32m8_i64m1_tum(vm, vd, vs2, vs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vwredsumu.c b/auto-generated/policy_funcs/llvm-api-tests/vwredsumu.c index ddfc811ac..487d0a2d5 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vwredsumu.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vwredsumu.c @@ -5,146 +5,200 @@ #include -vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_tu(vuint16m1_t vd, vuint8mf8_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_tu(vuint16m1_t vd, vuint8mf8_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vwredsumu_vs_u8mf8_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_tu(vuint16m1_t vd, vuint8mf4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_tu(vuint16m1_t vd, vuint8mf4_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vwredsumu_vs_u8mf4_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vwredsumu_vs_u8mf2_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_tu(vuint16m1_t vd, vuint8m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_tu(vuint16m1_t vd, vuint8m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vwredsumu_vs_u8m1_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_tu(vuint16m1_t vd, vuint8m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_tu(vuint16m1_t vd, vuint8m2_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vwredsumu_vs_u8m2_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_tu(vuint16m1_t vd, vuint8m4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_tu(vuint16m1_t vd, vuint8m4_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vwredsumu_vs_u8m4_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_tu(vuint16m1_t vd, vuint8m8_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_tu(vuint16m1_t vd, vuint8m8_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vwredsumu_vs_u8m8_u16m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_tu(vuint32m1_t vd, vuint16mf4_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_tu(vuint32m1_t vd, vuint16mf4_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vwredsumu_vs_u16mf4_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vwredsumu_vs_u16mf2_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_tu(vuint32m1_t vd, vuint16m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_tu(vuint32m1_t vd, vuint16m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vwredsumu_vs_u16m1_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_tu(vuint32m1_t vd, vuint16m2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_tu(vuint32m1_t vd, vuint16m2_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vwredsumu_vs_u16m2_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_tu(vuint32m1_t vd, vuint16m4_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_tu(vuint32m1_t vd, vuint16m4_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vwredsumu_vs_u16m4_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_tu(vuint32m1_t vd, vuint16m8_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_tu(vuint32m1_t vd, vuint16m8_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vwredsumu_vs_u16m8_u32m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vwredsumu_vs_u32mf2_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_tu(vuint64m1_t vd, vuint32m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_tu(vuint64m1_t vd, vuint32m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vwredsumu_vs_u32m1_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_tu(vuint64m1_t vd, vuint32m2_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_tu(vuint64m1_t vd, vuint32m2_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vwredsumu_vs_u32m2_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_tu(vuint64m1_t vd, vuint32m4_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_tu(vuint64m1_t vd, vuint32m4_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vwredsumu_vs_u32m4_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_tu(vuint64m1_t vd, vuint32m8_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_tu(vuint64m1_t vd, vuint32m8_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vwredsumu_vs_u32m8_u64m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_tum(vbool64_t vm, vuint16m1_t vd, vuint8mf8_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_tum(vbool64_t vm, vuint16m1_t vd, + vuint8mf8_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vwredsumu_vs_u8mf8_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_tum(vbool32_t vm, vuint16m1_t vd, vuint8mf4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_tum(vbool32_t vm, vuint16m1_t vd, + vuint8mf4_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vwredsumu_vs_u8mf4_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vwredsumu_vs_u8mf2_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_tum(vbool8_t vm, vuint16m1_t vd, vuint8m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_tum(vbool8_t vm, vuint16m1_t vd, + vuint8m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vwredsumu_vs_u8m1_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_tum(vbool4_t vm, vuint16m1_t vd, vuint8m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_tum(vbool4_t vm, vuint16m1_t vd, + vuint8m2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vwredsumu_vs_u8m2_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_tum(vbool2_t vm, vuint16m1_t vd, vuint8m4_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_tum(vbool2_t vm, vuint16m1_t vd, + vuint8m4_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vwredsumu_vs_u8m4_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_tum(vbool1_t vm, vuint16m1_t vd, vuint8m8_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_tum(vbool1_t vm, vuint16m1_t vd, + vuint8m8_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vwredsumu_vs_u8m8_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_tum(vbool64_t vm, vuint32m1_t vd, vuint16mf4_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_tum(vbool64_t vm, vuint32m1_t vd, + vuint16mf4_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vwredsumu_vs_u16mf4_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vwredsumu_vs_u16mf2_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_tum(vbool16_t vm, vuint32m1_t vd, vuint16m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_tum(vbool16_t vm, vuint32m1_t vd, + vuint16m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vwredsumu_vs_u16m1_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_tum(vbool8_t vm, vuint32m1_t vd, vuint16m2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_tum(vbool8_t vm, vuint32m1_t vd, + vuint16m2_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vwredsumu_vs_u16m2_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_tum(vbool4_t vm, vuint32m1_t vd, vuint16m4_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_tum(vbool4_t vm, vuint32m1_t vd, + vuint16m4_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vwredsumu_vs_u16m4_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_tum(vbool2_t vm, vuint32m1_t vd, vuint16m8_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_tum(vbool2_t vm, vuint32m1_t vd, + vuint16m8_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vwredsumu_vs_u16m8_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vwredsumu_vs_u32mf2_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_tum(vbool32_t vm, vuint64m1_t vd, vuint32m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_tum(vbool32_t vm, vuint64m1_t vd, + vuint32m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vwredsumu_vs_u32m1_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_tum(vbool16_t vm, vuint64m1_t vd, vuint32m2_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_tum(vbool16_t vm, vuint64m1_t vd, + vuint32m2_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vwredsumu_vs_u32m2_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_tum(vbool8_t vm, vuint64m1_t vd, vuint32m4_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_tum(vbool8_t vm, vuint64m1_t vd, + vuint32m4_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vwredsumu_vs_u32m4_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_tum(vbool4_t vm, vuint64m1_t vd, vuint32m8_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_tum(vbool4_t vm, vuint64m1_t vd, + vuint32m8_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vwredsumu_vs_u32m8_u64m1_tum(vm, vd, vs2, vs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vwsub.c b/auto-generated/policy_funcs/llvm-api-tests/vwsub.c index f48b90d1f..a7d33ee79 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vwsub.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vwsub.c @@ -5,962 +5,1220 @@ #include -vint16mf4_t test_vwsub_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint16mf4_t test_vwsub_vv_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vwsub_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vwsub_vx_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint16mf4_t test_vwsub_vx_i16mf4_tu(vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vwsub_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vwsub_wv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { +vint16mf4_t test_vwsub_wv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vwsub_wv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vwsub_wx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int8_t rs1, size_t vl) { +vint16mf4_t test_vwsub_wx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vwsub_wx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vwsub_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint16mf2_t test_vwsub_vv_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vwsub_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vwsub_vx_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint16mf2_t test_vwsub_vx_i16mf2_tu(vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vwsub_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vwsub_wv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { +vint16mf2_t test_vwsub_wv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vwsub_wv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vwsub_wx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int8_t rs1, size_t vl) { +vint16mf2_t test_vwsub_wx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vwsub_wx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vwsub_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint16m1_t test_vwsub_vv_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, + size_t vl) { return __riscv_vwsub_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwsub_vx_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint16m1_t test_vwsub_vx_i16m1_tu(vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vwsub_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vwsub_wv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { +vint16m1_t test_vwsub_wv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint8mf2_t vs1, + size_t vl) { return __riscv_vwsub_wv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vwsub_wx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int8_t rs1, size_t vl) { +vint16m1_t test_vwsub_wx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int8_t rs1, + size_t vl) { return __riscv_vwsub_wx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vwsub_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint16m2_t test_vwsub_vv_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vwsub_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vwsub_vx_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint16m2_t test_vwsub_vx_i16m2_tu(vint16m2_t vd, vint8m1_t vs2, int8_t rs1, + size_t vl) { return __riscv_vwsub_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vwsub_wv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { +vint16m2_t test_vwsub_wv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vwsub_wv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vwsub_wx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int8_t rs1, size_t vl) { +vint16m2_t test_vwsub_wx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vwsub_wx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vwsub_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint16m4_t test_vwsub_vv_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, + size_t vl) { return __riscv_vwsub_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vwsub_vx_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint16m4_t test_vwsub_vx_i16m4_tu(vint16m4_t vd, vint8m2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vwsub_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vwsub_wv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint8m2_t vs1, size_t vl) { +vint16m4_t test_vwsub_wv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint8m2_t vs1, + size_t vl) { return __riscv_vwsub_wv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vwsub_wx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int8_t rs1, size_t vl) { +vint16m4_t test_vwsub_wx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vwsub_wx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vwsub_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint16m8_t test_vwsub_vv_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, + size_t vl) { return __riscv_vwsub_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vwsub_vx_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint16m8_t test_vwsub_vx_i16m8_tu(vint16m8_t vd, vint8m4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vwsub_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vwsub_wv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint8m4_t vs1, size_t vl) { +vint16m8_t test_vwsub_wv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint8m4_t vs1, + size_t vl) { return __riscv_vwsub_wv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vwsub_wx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int8_t rs1, size_t vl) { +vint16m8_t test_vwsub_wx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vwsub_wx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vwsub_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint32mf2_t test_vwsub_vv_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, + vint16mf4_t vs1, size_t vl) { return __riscv_vwsub_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vwsub_vx_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint32mf2_t test_vwsub_vx_i32mf2_tu(vint32mf2_t vd, vint16mf4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwsub_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vwsub_wv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { +vint32mf2_t test_vwsub_wv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + vint16mf4_t vs1, size_t vl) { return __riscv_vwsub_wv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vwsub_wx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int16_t rs1, size_t vl) { +vint32mf2_t test_vwsub_wx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwsub_wx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vwsub_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint32m1_t test_vwsub_vv_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vwsub_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwsub_vx_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint32m1_t test_vwsub_vx_i32m1_tu(vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vwsub_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vwsub_wv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { +vint32m1_t test_vwsub_wv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vwsub_wv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vwsub_wx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int16_t rs1, size_t vl) { +vint32m1_t test_vwsub_wx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int16_t rs1, + size_t vl) { return __riscv_vwsub_wx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vwsub_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint32m2_t test_vwsub_vv_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vwsub_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vwsub_vx_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint32m2_t test_vwsub_vx_i32m2_tu(vint32m2_t vd, vint16m1_t vs2, int16_t rs1, + size_t vl) { return __riscv_vwsub_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vwsub_wv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { +vint32m2_t test_vwsub_wv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vwsub_wv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vwsub_wx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int16_t rs1, size_t vl) { +vint32m2_t test_vwsub_wx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vwsub_wx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vwsub_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint32m4_t test_vwsub_vv_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, + size_t vl) { return __riscv_vwsub_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vwsub_vx_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint32m4_t test_vwsub_vx_i32m4_tu(vint32m4_t vd, vint16m2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vwsub_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vwsub_wv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint16m2_t vs1, size_t vl) { +vint32m4_t test_vwsub_wv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint16m2_t vs1, + size_t vl) { return __riscv_vwsub_wv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vwsub_wx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int16_t rs1, size_t vl) { +vint32m4_t test_vwsub_wx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vwsub_wx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vwsub_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint32m8_t test_vwsub_vv_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, + size_t vl) { return __riscv_vwsub_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vwsub_vx_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint32m8_t test_vwsub_vx_i32m8_tu(vint32m8_t vd, vint16m4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vwsub_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vwsub_wv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint16m4_t vs1, size_t vl) { +vint32m8_t test_vwsub_wv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint16m4_t vs1, + size_t vl) { return __riscv_vwsub_wv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vwsub_wx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int16_t rs1, size_t vl) { +vint32m8_t test_vwsub_wx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int16_t rs1, + size_t vl) { return __riscv_vwsub_wx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vwsub_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint64m1_t test_vwsub_vv_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vwsub_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwsub_vx_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint64m1_t test_vwsub_vx_i64m1_tu(vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vwsub_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vwsub_wv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { +vint64m1_t test_vwsub_wv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vwsub_wv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vwsub_wx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int32_t rs1, size_t vl) { +vint64m1_t test_vwsub_wx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int32_t rs1, + size_t vl) { return __riscv_vwsub_wx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vwsub_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint64m2_t test_vwsub_vv_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vwsub_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vwsub_vx_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint64m2_t test_vwsub_vx_i64m2_tu(vint64m2_t vd, vint32m1_t vs2, int32_t rs1, + size_t vl) { return __riscv_vwsub_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vwsub_wv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { +vint64m2_t test_vwsub_wv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vwsub_wv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vwsub_wx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int32_t rs1, size_t vl) { +vint64m2_t test_vwsub_wx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vwsub_wx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vwsub_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint64m4_t test_vwsub_vv_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, + size_t vl) { return __riscv_vwsub_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vwsub_vx_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint64m4_t test_vwsub_vx_i64m4_tu(vint64m4_t vd, vint32m2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vwsub_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vwsub_wv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint32m2_t vs1, size_t vl) { +vint64m4_t test_vwsub_wv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint32m2_t vs1, + size_t vl) { return __riscv_vwsub_wv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vwsub_wx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int32_t rs1, size_t vl) { +vint64m4_t test_vwsub_wx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int32_t rs1, + size_t vl) { return __riscv_vwsub_wx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vwsub_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint64m8_t test_vwsub_vv_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, + size_t vl) { return __riscv_vwsub_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vwsub_vx_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint64m8_t test_vwsub_vx_i64m8_tu(vint64m8_t vd, vint32m4_t vs2, int32_t rs1, + size_t vl) { return __riscv_vwsub_vx_i64m8_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vwsub_wv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint32m4_t vs1, size_t vl) { +vint64m8_t test_vwsub_wv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint32m4_t vs1, + size_t vl) { return __riscv_vwsub_wv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vwsub_wx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int32_t rs1, size_t vl) { +vint64m8_t test_vwsub_wx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int32_t rs1, + size_t vl) { return __riscv_vwsub_wx_i64m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vwsub_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint16mf4_t test_vwsub_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs2, vint8mf8_t vs1, + size_t vl) { return __riscv_vwsub_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwsub_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint16mf4_t test_vwsub_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs2, int8_t rs1, size_t vl) { return __riscv_vwsub_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwsub_wv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { +vint16mf4_t test_vwsub_wv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint8mf8_t vs1, + size_t vl) { return __riscv_vwsub_wv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwsub_wx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int8_t rs1, size_t vl) { +vint16mf4_t test_vwsub_wx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int8_t rs1, size_t vl) { return __riscv_vwsub_wx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwsub_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint16mf2_t test_vwsub_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs2, vint8mf4_t vs1, + size_t vl) { return __riscv_vwsub_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwsub_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint16mf2_t test_vwsub_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs2, int8_t rs1, size_t vl) { return __riscv_vwsub_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwsub_wv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { +vint16mf2_t test_vwsub_wv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint8mf4_t vs1, + size_t vl) { return __riscv_vwsub_wv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwsub_wx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int8_t rs1, size_t vl) { +vint16mf2_t test_vwsub_wx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int8_t rs1, size_t vl) { return __riscv_vwsub_wx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwsub_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint16m1_t test_vwsub_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vwsub_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwsub_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint16m1_t test_vwsub_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwsub_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwsub_wv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { +vint16m1_t test_vwsub_wv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vwsub_wv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwsub_wx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int8_t rs1, size_t vl) { +vint16m1_t test_vwsub_wx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwsub_wx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwsub_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint16m2_t test_vwsub_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vwsub_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwsub_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint16m2_t test_vwsub_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwsub_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwsub_wv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { +vint16m2_t test_vwsub_wv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vwsub_wv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwsub_wx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int8_t rs1, size_t vl) { +vint16m2_t test_vwsub_wx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwsub_wx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwsub_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint16m4_t test_vwsub_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vwsub_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwsub_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint16m4_t test_vwsub_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwsub_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwsub_wv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint8m2_t vs1, size_t vl) { +vint16m4_t test_vwsub_wv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vwsub_wv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwsub_wx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int8_t rs1, size_t vl) { +vint16m4_t test_vwsub_wx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwsub_wx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwsub_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint16m8_t test_vwsub_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vwsub_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwsub_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint16m8_t test_vwsub_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwsub_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwsub_wv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint8m4_t vs1, size_t vl) { +vint16m8_t test_vwsub_wv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vwsub_wv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwsub_wx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int8_t rs1, size_t vl) { +vint16m8_t test_vwsub_wx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwsub_wx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwsub_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint32mf2_t test_vwsub_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vwsub_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwsub_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint32mf2_t test_vwsub_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vwsub_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwsub_wv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { +vint32mf2_t test_vwsub_wv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vwsub_wv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwsub_wx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int16_t rs1, size_t vl) { +vint32mf2_t test_vwsub_wx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vwsub_wx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwsub_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint32m1_t test_vwsub_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vwsub_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwsub_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint32m1_t test_vwsub_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwsub_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwsub_wv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { +vint32m1_t test_vwsub_wv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vwsub_wv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwsub_wx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int16_t rs1, size_t vl) { +vint32m1_t test_vwsub_wx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwsub_wx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwsub_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint32m2_t test_vwsub_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vwsub_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwsub_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint32m2_t test_vwsub_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwsub_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwsub_wv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { +vint32m2_t test_vwsub_wv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vwsub_wv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwsub_wx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int16_t rs1, size_t vl) { +vint32m2_t test_vwsub_wx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwsub_wx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwsub_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint32m4_t test_vwsub_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vwsub_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwsub_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint32m4_t test_vwsub_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwsub_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwsub_wv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint16m2_t vs1, size_t vl) { +vint32m4_t test_vwsub_wv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vwsub_wv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwsub_wx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int16_t rs1, size_t vl) { +vint32m4_t test_vwsub_wx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwsub_wx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwsub_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint32m8_t test_vwsub_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vwsub_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwsub_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint32m8_t test_vwsub_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwsub_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwsub_wv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint16m4_t vs1, size_t vl) { +vint32m8_t test_vwsub_wv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vwsub_wv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwsub_wx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int16_t rs1, size_t vl) { +vint32m8_t test_vwsub_wx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwsub_wx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwsub_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint64m1_t test_vwsub_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vwsub_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwsub_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint64m1_t test_vwsub_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwsub_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwsub_wv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { +vint64m1_t test_vwsub_wv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vwsub_wv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwsub_wx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int32_t rs1, size_t vl) { +vint64m1_t test_vwsub_wx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwsub_wx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwsub_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint64m2_t test_vwsub_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vwsub_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwsub_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint64m2_t test_vwsub_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwsub_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwsub_wv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { +vint64m2_t test_vwsub_wv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vwsub_wv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwsub_wx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int32_t rs1, size_t vl) { +vint64m2_t test_vwsub_wx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwsub_wx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwsub_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint64m4_t test_vwsub_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vwsub_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwsub_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint64m4_t test_vwsub_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwsub_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwsub_wv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint32m2_t vs1, size_t vl) { +vint64m4_t test_vwsub_wv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vwsub_wv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwsub_wx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int32_t rs1, size_t vl) { +vint64m4_t test_vwsub_wx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwsub_wx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwsub_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint64m8_t test_vwsub_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vwsub_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwsub_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint64m8_t test_vwsub_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwsub_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwsub_wv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint32m4_t vs1, size_t vl) { +vint64m8_t test_vwsub_wv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vwsub_wv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwsub_wx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int32_t rs1, size_t vl) { +vint64m8_t test_vwsub_wx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwsub_wx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwsub_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint16mf4_t test_vwsub_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs2, vint8mf8_t vs1, + size_t vl) { return __riscv_vwsub_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwsub_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint16mf4_t test_vwsub_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs2, int8_t rs1, size_t vl) { return __riscv_vwsub_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwsub_wv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { +vint16mf4_t test_vwsub_wv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint8mf8_t vs1, + size_t vl) { return __riscv_vwsub_wv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwsub_wx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int8_t rs1, size_t vl) { +vint16mf4_t test_vwsub_wx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int8_t rs1, size_t vl) { return __riscv_vwsub_wx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwsub_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint16mf2_t test_vwsub_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs2, vint8mf4_t vs1, + size_t vl) { return __riscv_vwsub_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwsub_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint16mf2_t test_vwsub_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs2, int8_t rs1, size_t vl) { return __riscv_vwsub_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwsub_wv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { +vint16mf2_t test_vwsub_wv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint8mf4_t vs1, + size_t vl) { return __riscv_vwsub_wv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwsub_wx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int8_t rs1, size_t vl) { +vint16mf2_t test_vwsub_wx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int8_t rs1, size_t vl) { return __riscv_vwsub_wx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwsub_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint16m1_t test_vwsub_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vwsub_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwsub_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint16m1_t test_vwsub_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwsub_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwsub_wv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { +vint16m1_t test_vwsub_wv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vwsub_wv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwsub_wx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int8_t rs1, size_t vl) { +vint16m1_t test_vwsub_wx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwsub_wx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwsub_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint16m2_t test_vwsub_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vwsub_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwsub_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint16m2_t test_vwsub_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwsub_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwsub_wv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { +vint16m2_t test_vwsub_wv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vwsub_wv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwsub_wx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int8_t rs1, size_t vl) { +vint16m2_t test_vwsub_wx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwsub_wx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwsub_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint16m4_t test_vwsub_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vwsub_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwsub_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint16m4_t test_vwsub_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwsub_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwsub_wv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint8m2_t vs1, size_t vl) { +vint16m4_t test_vwsub_wv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vwsub_wv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwsub_wx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int8_t rs1, size_t vl) { +vint16m4_t test_vwsub_wx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwsub_wx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwsub_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint16m8_t test_vwsub_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vwsub_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwsub_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint16m8_t test_vwsub_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwsub_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwsub_wv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint8m4_t vs1, size_t vl) { +vint16m8_t test_vwsub_wv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vwsub_wv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwsub_wx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int8_t rs1, size_t vl) { +vint16m8_t test_vwsub_wx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwsub_wx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwsub_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint32mf2_t test_vwsub_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vwsub_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwsub_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint32mf2_t test_vwsub_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vwsub_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwsub_wv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { +vint32mf2_t test_vwsub_wv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vwsub_wv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwsub_wx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int16_t rs1, size_t vl) { +vint32mf2_t test_vwsub_wx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vwsub_wx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwsub_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint32m1_t test_vwsub_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vwsub_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwsub_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint32m1_t test_vwsub_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vwsub_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwsub_wv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { +vint32m1_t test_vwsub_wv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vwsub_wv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwsub_wx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int16_t rs1, size_t vl) { +vint32m1_t test_vwsub_wx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwsub_wx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwsub_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint32m2_t test_vwsub_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vwsub_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwsub_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint32m2_t test_vwsub_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwsub_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwsub_wv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { +vint32m2_t test_vwsub_wv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vwsub_wv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwsub_wx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int16_t rs1, size_t vl) { +vint32m2_t test_vwsub_wx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwsub_wx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwsub_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint32m4_t test_vwsub_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vwsub_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwsub_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint32m4_t test_vwsub_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwsub_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwsub_wv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint16m2_t vs1, size_t vl) { +vint32m4_t test_vwsub_wv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vwsub_wv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwsub_wx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int16_t rs1, size_t vl) { +vint32m4_t test_vwsub_wx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwsub_wx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwsub_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint32m8_t test_vwsub_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vwsub_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwsub_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint32m8_t test_vwsub_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwsub_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwsub_wv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint16m4_t vs1, size_t vl) { +vint32m8_t test_vwsub_wv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vwsub_wv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwsub_wx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int16_t rs1, size_t vl) { +vint32m8_t test_vwsub_wx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwsub_wx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwsub_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint64m1_t test_vwsub_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vwsub_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwsub_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint64m1_t test_vwsub_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vwsub_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwsub_wv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { +vint64m1_t test_vwsub_wv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vwsub_wv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwsub_wx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int32_t rs1, size_t vl) { +vint64m1_t test_vwsub_wx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwsub_wx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwsub_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint64m2_t test_vwsub_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vwsub_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwsub_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint64m2_t test_vwsub_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwsub_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwsub_wv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { +vint64m2_t test_vwsub_wv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vwsub_wv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwsub_wx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int32_t rs1, size_t vl) { +vint64m2_t test_vwsub_wx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwsub_wx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwsub_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint64m4_t test_vwsub_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vwsub_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwsub_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint64m4_t test_vwsub_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwsub_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwsub_wv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint32m2_t vs1, size_t vl) { +vint64m4_t test_vwsub_wv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vwsub_wv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwsub_wx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int32_t rs1, size_t vl) { +vint64m4_t test_vwsub_wx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwsub_wx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwsub_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint64m8_t test_vwsub_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vwsub_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwsub_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint64m8_t test_vwsub_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwsub_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwsub_wv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint32m4_t vs1, size_t vl) { +vint64m8_t test_vwsub_wv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vwsub_wv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwsub_wx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int32_t rs1, size_t vl) { +vint64m8_t test_vwsub_wx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwsub_wx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwsub_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint16mf4_t test_vwsub_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { return __riscv_vwsub_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwsub_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint16mf4_t test_vwsub_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint8mf8_t vs2, int8_t rs1, size_t vl) { return __riscv_vwsub_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vwsub_wv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint8mf8_t vs1, size_t vl) { +vint16mf4_t test_vwsub_wv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint8mf8_t vs1, + size_t vl) { return __riscv_vwsub_wv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vwsub_wx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int8_t rs1, size_t vl) { +vint16mf4_t test_vwsub_wx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int8_t rs1, size_t vl) { return __riscv_vwsub_wx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwsub_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint16mf2_t test_vwsub_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { return __riscv_vwsub_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwsub_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint16mf2_t test_vwsub_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint8mf4_t vs2, int8_t rs1, size_t vl) { return __riscv_vwsub_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vwsub_wv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint8mf4_t vs1, size_t vl) { +vint16mf2_t test_vwsub_wv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint8mf4_t vs1, + size_t vl) { return __riscv_vwsub_wv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vwsub_wx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int8_t rs1, size_t vl) { +vint16mf2_t test_vwsub_wx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int8_t rs1, size_t vl) { return __riscv_vwsub_wx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwsub_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint16m1_t test_vwsub_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vwsub_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwsub_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint16m1_t test_vwsub_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwsub_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vwsub_wv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint8mf2_t vs1, size_t vl) { +vint16m1_t test_vwsub_wv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vwsub_wv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vwsub_wx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int8_t rs1, size_t vl) { +vint16m1_t test_vwsub_wx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwsub_wx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwsub_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint16m2_t test_vwsub_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vwsub_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwsub_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint16m2_t test_vwsub_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwsub_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vwsub_wv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint8m1_t vs1, size_t vl) { +vint16m2_t test_vwsub_wv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vwsub_wv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vwsub_wx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int8_t rs1, size_t vl) { +vint16m2_t test_vwsub_wx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwsub_wx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwsub_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint16m4_t test_vwsub_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vwsub_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwsub_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint16m4_t test_vwsub_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwsub_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vwsub_wv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint8m2_t vs1, size_t vl) { +vint16m4_t test_vwsub_wv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vwsub_wv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vwsub_wx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int8_t rs1, size_t vl) { +vint16m4_t test_vwsub_wx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwsub_wx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwsub_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint16m8_t test_vwsub_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vwsub_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwsub_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint16m8_t test_vwsub_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwsub_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vwsub_wv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint8m4_t vs1, size_t vl) { +vint16m8_t test_vwsub_wv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vwsub_wv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vwsub_wx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int8_t rs1, size_t vl) { +vint16m8_t test_vwsub_wx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vwsub_wx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwsub_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint32mf2_t test_vwsub_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vwsub_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwsub_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint32mf2_t test_vwsub_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vwsub_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vwsub_wv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint16mf4_t vs1, size_t vl) { +vint32mf2_t test_vwsub_wv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vwsub_wv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vwsub_wx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int16_t rs1, size_t vl) { +vint32mf2_t test_vwsub_wx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vwsub_wx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwsub_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint32m1_t test_vwsub_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vwsub_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwsub_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint32m1_t test_vwsub_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint16mf2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwsub_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vwsub_wv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint16mf2_t vs1, size_t vl) { +vint32m1_t test_vwsub_wv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vwsub_wv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vwsub_wx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int16_t rs1, size_t vl) { +vint32m1_t test_vwsub_wx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwsub_wx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwsub_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint32m2_t test_vwsub_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vwsub_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwsub_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint32m2_t test_vwsub_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwsub_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vwsub_wv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint16m1_t vs1, size_t vl) { +vint32m2_t test_vwsub_wv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vwsub_wv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vwsub_wx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int16_t rs1, size_t vl) { +vint32m2_t test_vwsub_wx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwsub_wx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwsub_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint32m4_t test_vwsub_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vwsub_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwsub_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint32m4_t test_vwsub_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwsub_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vwsub_wv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint16m2_t vs1, size_t vl) { +vint32m4_t test_vwsub_wv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vwsub_wv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vwsub_wx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int16_t rs1, size_t vl) { +vint32m4_t test_vwsub_wx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwsub_wx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwsub_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint32m8_t test_vwsub_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vwsub_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwsub_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint32m8_t test_vwsub_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwsub_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vwsub_wv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint16m4_t vs1, size_t vl) { +vint32m8_t test_vwsub_wv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vwsub_wv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vwsub_wx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int16_t rs1, size_t vl) { +vint32m8_t test_vwsub_wx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vwsub_wx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwsub_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint64m1_t test_vwsub_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vwsub_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwsub_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint64m1_t test_vwsub_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint32mf2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwsub_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vwsub_wv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint32mf2_t vs1, size_t vl) { +vint64m1_t test_vwsub_wv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vwsub_wv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vwsub_wx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int32_t rs1, size_t vl) { +vint64m1_t test_vwsub_wx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwsub_wx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwsub_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint64m2_t test_vwsub_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vwsub_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwsub_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint64m2_t test_vwsub_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwsub_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vwsub_wv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint32m1_t vs1, size_t vl) { +vint64m2_t test_vwsub_wv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vwsub_wv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vwsub_wx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int32_t rs1, size_t vl) { +vint64m2_t test_vwsub_wx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwsub_wx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwsub_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint64m4_t test_vwsub_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vwsub_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwsub_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint64m4_t test_vwsub_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwsub_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vwsub_wv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint32m2_t vs1, size_t vl) { +vint64m4_t test_vwsub_wv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vwsub_wv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vwsub_wx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int32_t rs1, size_t vl) { +vint64m4_t test_vwsub_wx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwsub_wx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwsub_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint64m8_t test_vwsub_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vwsub_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwsub_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint64m8_t test_vwsub_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwsub_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vwsub_wv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint32m4_t vs1, size_t vl) { +vint64m8_t test_vwsub_wv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vwsub_wv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vwsub_wx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int32_t rs1, size_t vl) { +vint64m8_t test_vwsub_wx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vwsub_wx_i64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vwsubu.c b/auto-generated/policy_funcs/llvm-api-tests/vwsubu.c index f05fdf3bd..11fdc7f11 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vwsubu.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vwsubu.c @@ -5,962 +5,1323 @@ #include -vuint16mf4_t test_vwsubu_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwsubu_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vwsubu_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_vx_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint16mf4_t test_vwsubu_vx_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwsubu_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsubu_wv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwsubu_wv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vwsubu_wv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_wx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint8_t rs1, size_t vl) { +vuint16mf4_t test_vwsubu_wx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwsubu_wx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwsubu_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vwsubu_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_vx_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint16mf2_t test_vwsubu_vx_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwsubu_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_wv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwsubu_wv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vwsubu_wv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_wx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint8_t rs1, size_t vl) { +vuint16mf2_t test_vwsubu_wx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwsubu_wx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vwsubu_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwsubu_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vwsubu_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwsubu_vx_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint16m1_t test_vwsubu_vx_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwsubu_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vwsubu_wv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwsubu_wv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vwsubu_wv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vwsubu_wx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint8_t rs1, size_t vl) { +vuint16m1_t test_vwsubu_wx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwsubu_wx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vwsubu_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwsubu_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vwsubu_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vwsubu_vx_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint16m2_t test_vwsubu_vx_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwsubu_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vwsubu_wv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwsubu_wv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vwsubu_wv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vwsubu_wx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint8_t rs1, size_t vl) { +vuint16m2_t test_vwsubu_wx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwsubu_wx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vwsubu_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwsubu_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vwsubu_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vwsubu_vx_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint16m4_t test_vwsubu_vx_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwsubu_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vwsubu_wv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwsubu_wv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vwsubu_wv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vwsubu_wx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint8_t rs1, size_t vl) { +vuint16m4_t test_vwsubu_wx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwsubu_wx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vwsubu_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwsubu_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vwsubu_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vwsubu_vx_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint16m8_t test_vwsubu_vx_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwsubu_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vwsubu_wv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwsubu_wv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vwsubu_wv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vwsubu_wx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint8_t rs1, size_t vl) { +vuint16m8_t test_vwsubu_wx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwsubu_wx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwsubu_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vwsubu_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_vx_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint32mf2_t test_vwsubu_vx_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwsubu_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_wv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwsubu_wv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vwsubu_wv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_wx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint16_t rs1, size_t vl) { +vuint32mf2_t test_vwsubu_wx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwsubu_wx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vwsubu_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwsubu_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vwsubu_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwsubu_vx_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint32m1_t test_vwsubu_vx_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwsubu_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vwsubu_wv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwsubu_wv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vwsubu_wv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vwsubu_wx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint16_t rs1, size_t vl) { +vuint32m1_t test_vwsubu_wx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwsubu_wx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vwsubu_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwsubu_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vwsubu_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vwsubu_vx_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint32m2_t test_vwsubu_vx_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwsubu_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vwsubu_wv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwsubu_wv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vwsubu_wv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vwsubu_wx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint16_t rs1, size_t vl) { +vuint32m2_t test_vwsubu_wx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwsubu_wx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vwsubu_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwsubu_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vwsubu_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vwsubu_vx_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint32m4_t test_vwsubu_vx_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwsubu_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vwsubu_wv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwsubu_wv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vwsubu_wv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vwsubu_wx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint16_t rs1, size_t vl) { +vuint32m4_t test_vwsubu_wx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwsubu_wx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vwsubu_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwsubu_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vwsubu_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vwsubu_vx_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint32m8_t test_vwsubu_vx_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwsubu_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vwsubu_wv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwsubu_wv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vwsubu_wv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vwsubu_wx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint16_t rs1, size_t vl) { +vuint32m8_t test_vwsubu_wx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vwsubu_wx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vwsubu_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwsubu_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vwsubu_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwsubu_vx_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint64m1_t test_vwsubu_vx_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vwsubu_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vwsubu_wv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwsubu_wv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vwsubu_wv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vwsubu_wx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint32_t rs1, size_t vl) { +vuint64m1_t test_vwsubu_wx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vwsubu_wx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vwsubu_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwsubu_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vwsubu_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vwsubu_vx_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint64m2_t test_vwsubu_vx_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vwsubu_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vwsubu_wv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwsubu_wv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vwsubu_wv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vwsubu_wx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint32_t rs1, size_t vl) { +vuint64m2_t test_vwsubu_wx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vwsubu_wx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vwsubu_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwsubu_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vwsubu_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vwsubu_vx_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint64m4_t test_vwsubu_vx_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vwsubu_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vwsubu_wv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwsubu_wv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vwsubu_wv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vwsubu_wx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint32_t rs1, size_t vl) { +vuint64m4_t test_vwsubu_wx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vwsubu_wx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vwsubu_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwsubu_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vwsubu_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vwsubu_vx_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint64m8_t test_vwsubu_vx_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vwsubu_vx_u64m8_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vwsubu_wv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwsubu_wv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vwsubu_wv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vwsubu_wx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint32_t rs1, size_t vl) { +vuint64m8_t test_vwsubu_wx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vwsubu_wx_u64m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsubu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwsubu_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint16mf4_t test_vwsubu_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwsubu_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsubu_wv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwsubu_wv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_wx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint8_t rs1, size_t vl) { +vuint16mf4_t test_vwsubu_wx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwsubu_wx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwsubu_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint16mf2_t test_vwsubu_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwsubu_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_wv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwsubu_wv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_wx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint8_t rs1, size_t vl) { +vuint16mf2_t test_vwsubu_wx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwsubu_wx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwsubu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwsubu_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwsubu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint16m1_t test_vwsubu_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwsubu_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwsubu_wv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwsubu_wv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwsubu_wx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint8_t rs1, size_t vl) { +vuint16m1_t test_vwsubu_wx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwsubu_wx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwsubu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwsubu_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwsubu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint16m2_t test_vwsubu_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwsubu_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwsubu_wv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwsubu_wv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwsubu_wx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint8_t rs1, size_t vl) { +vuint16m2_t test_vwsubu_wx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwsubu_wx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwsubu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwsubu_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwsubu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint16m4_t test_vwsubu_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwsubu_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwsubu_wv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwsubu_wv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwsubu_wx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint8_t rs1, size_t vl) { +vuint16m4_t test_vwsubu_wx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwsubu_wx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwsubu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwsubu_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwsubu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint16m8_t test_vwsubu_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwsubu_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwsubu_wv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwsubu_wv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwsubu_wx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint8_t rs1, size_t vl) { +vuint16m8_t test_vwsubu_wx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwsubu_wx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwsubu_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint32mf2_t test_vwsubu_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwsubu_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_wv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwsubu_wv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_wx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint16_t rs1, size_t vl) { +vuint32mf2_t test_vwsubu_wx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwsubu_wx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwsubu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwsubu_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwsubu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint32m1_t test_vwsubu_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwsubu_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwsubu_wv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwsubu_wv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwsubu_wx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint16_t rs1, size_t vl) { +vuint32m1_t test_vwsubu_wx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwsubu_wx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwsubu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwsubu_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwsubu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint32m2_t test_vwsubu_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwsubu_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwsubu_wv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwsubu_wv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwsubu_wx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint16_t rs1, size_t vl) { +vuint32m2_t test_vwsubu_wx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwsubu_wx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwsubu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwsubu_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwsubu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint32m4_t test_vwsubu_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwsubu_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwsubu_wv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwsubu_wv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwsubu_wx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint16_t rs1, size_t vl) { +vuint32m4_t test_vwsubu_wx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwsubu_wx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwsubu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwsubu_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwsubu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint32m8_t test_vwsubu_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwsubu_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwsubu_wv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwsubu_wv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwsubu_wx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint16_t rs1, size_t vl) { +vuint32m8_t test_vwsubu_wx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwsubu_wx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwsubu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwsubu_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwsubu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint64m1_t test_vwsubu_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vwsubu_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwsubu_wv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwsubu_wv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwsubu_wx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint32_t rs1, size_t vl) { +vuint64m1_t test_vwsubu_wx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwsubu_wx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwsubu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwsubu_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwsubu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint64m2_t test_vwsubu_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwsubu_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwsubu_wv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwsubu_wv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwsubu_wx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint32_t rs1, size_t vl) { +vuint64m2_t test_vwsubu_wx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwsubu_wx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwsubu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwsubu_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwsubu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint64m4_t test_vwsubu_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwsubu_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwsubu_wv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwsubu_wv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwsubu_wx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint32_t rs1, size_t vl) { +vuint64m4_t test_vwsubu_wx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwsubu_wx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwsubu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwsubu_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwsubu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint64m8_t test_vwsubu_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwsubu_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwsubu_wv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwsubu_wv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwsubu_wx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint32_t rs1, size_t vl) { +vuint64m8_t test_vwsubu_wx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwsubu_wx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsubu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwsubu_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint16mf4_t test_vwsubu_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwsubu_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsubu_wv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwsubu_wv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_wx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint8_t rs1, size_t vl) { +vuint16mf4_t test_vwsubu_wx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwsubu_wx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwsubu_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint16mf2_t test_vwsubu_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwsubu_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_wv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwsubu_wv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_wx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint8_t rs1, size_t vl) { +vuint16mf2_t test_vwsubu_wx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwsubu_wx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwsubu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwsubu_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwsubu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint16m1_t test_vwsubu_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwsubu_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwsubu_wv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwsubu_wv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwsubu_wx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint8_t rs1, size_t vl) { +vuint16m1_t test_vwsubu_wx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwsubu_wx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwsubu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwsubu_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwsubu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint16m2_t test_vwsubu_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwsubu_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwsubu_wv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwsubu_wv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwsubu_wx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint8_t rs1, size_t vl) { +vuint16m2_t test_vwsubu_wx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwsubu_wx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwsubu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwsubu_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwsubu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint16m4_t test_vwsubu_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwsubu_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwsubu_wv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwsubu_wv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwsubu_wx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint8_t rs1, size_t vl) { +vuint16m4_t test_vwsubu_wx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwsubu_wx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwsubu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwsubu_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwsubu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint16m8_t test_vwsubu_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwsubu_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwsubu_wv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwsubu_wv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwsubu_wx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint8_t rs1, size_t vl) { +vuint16m8_t test_vwsubu_wx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwsubu_wx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwsubu_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint32mf2_t test_vwsubu_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwsubu_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_wv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwsubu_wv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_wx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint16_t rs1, size_t vl) { +vuint32mf2_t test_vwsubu_wx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwsubu_wx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwsubu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwsubu_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwsubu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint32m1_t test_vwsubu_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwsubu_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwsubu_wv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwsubu_wv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwsubu_wx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint16_t rs1, size_t vl) { +vuint32m1_t test_vwsubu_wx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwsubu_wx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwsubu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwsubu_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwsubu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint32m2_t test_vwsubu_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwsubu_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwsubu_wv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwsubu_wv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwsubu_wx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint16_t rs1, size_t vl) { +vuint32m2_t test_vwsubu_wx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwsubu_wx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwsubu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwsubu_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwsubu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint32m4_t test_vwsubu_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwsubu_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwsubu_wv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwsubu_wv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwsubu_wx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint16_t rs1, size_t vl) { +vuint32m4_t test_vwsubu_wx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwsubu_wx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwsubu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwsubu_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwsubu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint32m8_t test_vwsubu_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwsubu_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwsubu_wv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwsubu_wv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwsubu_wx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint16_t rs1, size_t vl) { +vuint32m8_t test_vwsubu_wx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwsubu_wx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwsubu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwsubu_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwsubu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint64m1_t test_vwsubu_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vwsubu_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwsubu_wv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwsubu_wv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwsubu_wx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint32_t rs1, size_t vl) { +vuint64m1_t test_vwsubu_wx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vwsubu_wx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwsubu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwsubu_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwsubu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint64m2_t test_vwsubu_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vwsubu_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwsubu_wv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwsubu_wv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwsubu_wx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint32_t rs1, size_t vl) { +vuint64m2_t test_vwsubu_wx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vwsubu_wx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwsubu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwsubu_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwsubu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint64m4_t test_vwsubu_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vwsubu_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwsubu_wv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwsubu_wv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwsubu_wx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint32_t rs1, size_t vl) { +vuint64m4_t test_vwsubu_wx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vwsubu_wx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwsubu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwsubu_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwsubu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint64m8_t test_vwsubu_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vwsubu_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwsubu_wv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwsubu_wv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwsubu_wx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint32_t rs1, size_t vl) { +vuint64m8_t test_vwsubu_wx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vwsubu_wx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsubu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwsubu_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint16mf4_t test_vwsubu_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwsubu_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vwsubu_wv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint16mf4_t test_vwsubu_wv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vwsubu_wx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint8_t rs1, size_t vl) { +vuint16mf4_t test_vwsubu_wx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwsubu_wx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwsubu_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint16mf2_t test_vwsubu_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwsubu_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vwsubu_wv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint16mf2_t test_vwsubu_wv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vwsubu_wx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint8_t rs1, size_t vl) { +vuint16mf2_t test_vwsubu_wx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vwsubu_wx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwsubu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwsubu_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwsubu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint16m1_t test_vwsubu_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwsubu_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vwsubu_wv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint16m1_t test_vwsubu_wv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vwsubu_wx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint8_t rs1, size_t vl) { +vuint16m1_t test_vwsubu_wx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwsubu_wx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwsubu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwsubu_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vwsubu_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwsubu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint16m2_t test_vwsubu_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwsubu_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vwsubu_wv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint8m1_t vs1, size_t vl) { +vuint16m2_t test_vwsubu_wv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vwsubu_wx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint8_t rs1, size_t vl) { +vuint16m2_t test_vwsubu_wx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwsubu_wx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwsubu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwsubu_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vwsubu_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwsubu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint16m4_t test_vwsubu_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwsubu_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vwsubu_wv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint8m2_t vs1, size_t vl) { +vuint16m4_t test_vwsubu_wv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vwsubu_wx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint8_t rs1, size_t vl) { +vuint16m4_t test_vwsubu_wx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwsubu_wx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwsubu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwsubu_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vwsubu_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwsubu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint16m8_t test_vwsubu_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vwsubu_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vwsubu_wv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint8m4_t vs1, size_t vl) { +vuint16m8_t test_vwsubu_wv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vwsubu_wx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint8_t rs1, size_t vl) { +vuint16m8_t test_vwsubu_wx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vwsubu_wx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwsubu_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint32mf2_t test_vwsubu_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwsubu_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vwsubu_wv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint32mf2_t test_vwsubu_wv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vwsubu_wx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint16_t rs1, size_t vl) { +vuint32mf2_t test_vwsubu_wx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vwsubu_wx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwsubu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwsubu_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwsubu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint32m1_t test_vwsubu_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwsubu_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vwsubu_wv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint32m1_t test_vwsubu_wv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vwsubu_wx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint16_t rs1, size_t vl) { +vuint32m1_t test_vwsubu_wx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwsubu_wx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwsubu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwsubu_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwsubu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint32m2_t test_vwsubu_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwsubu_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vwsubu_wv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint16m1_t vs1, size_t vl) { +vuint32m2_t test_vwsubu_wv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vwsubu_wx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint16_t rs1, size_t vl) { +vuint32m2_t test_vwsubu_wx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwsubu_wx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwsubu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwsubu_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwsubu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint32m4_t test_vwsubu_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwsubu_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vwsubu_wv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint16m2_t vs1, size_t vl) { +vuint32m4_t test_vwsubu_wv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vwsubu_wx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint16_t rs1, size_t vl) { +vuint32m4_t test_vwsubu_wx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwsubu_wx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwsubu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwsubu_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwsubu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint32m8_t test_vwsubu_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwsubu_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vwsubu_wv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint16m4_t vs1, size_t vl) { +vuint32m8_t test_vwsubu_wv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vwsubu_wx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint16_t rs1, size_t vl) { +vuint32m8_t test_vwsubu_wx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vwsubu_wx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwsubu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwsubu_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwsubu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint64m1_t test_vwsubu_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwsubu_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vwsubu_wv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint64m1_t test_vwsubu_wv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vwsubu_wx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint32_t rs1, size_t vl) { +vuint64m1_t test_vwsubu_wx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwsubu_wx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwsubu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwsubu_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwsubu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint64m2_t test_vwsubu_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwsubu_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vwsubu_wv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint32m1_t vs1, size_t vl) { +vuint64m2_t test_vwsubu_wv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vwsubu_wx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint32_t rs1, size_t vl) { +vuint64m2_t test_vwsubu_wx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwsubu_wx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwsubu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwsubu_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwsubu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint64m4_t test_vwsubu_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwsubu_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vwsubu_wv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint32m2_t vs1, size_t vl) { +vuint64m4_t test_vwsubu_wv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vwsubu_wx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint32_t rs1, size_t vl) { +vuint64m4_t test_vwsubu_wx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwsubu_wx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwsubu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwsubu_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vwsubu_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwsubu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint64m8_t test_vwsubu_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwsubu_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vwsubu_wv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint32m4_t vs1, size_t vl) { +vuint64m8_t test_vwsubu_wv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vwsubu_wv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vwsubu_wx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint32_t rs1, size_t vl) { +vuint64m8_t test_vwsubu_wx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint32_t rs1, size_t vl) { return __riscv_vwsubu_wx_u64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vxor.c b/auto-generated/policy_funcs/llvm-api-tests/vxor.c index 84fd40fa4..e14c94abf 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vxor.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vxor.c @@ -5,1410 +5,1810 @@ #include -vint8mf8_t test_vxor_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vxor_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, + size_t vl) { return __riscv_vxor_vv_i8mf8_tu(vd, vs2, vs1, vl); } -vint8mf8_t test_vxor_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vxor_vx_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vxor_vx_i8mf8_tu(vd, vs2, rs1, vl); } -vint8mf4_t test_vxor_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vxor_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, + size_t vl) { return __riscv_vxor_vv_i8mf4_tu(vd, vs2, vs1, vl); } -vint8mf4_t test_vxor_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vxor_vx_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vxor_vx_i8mf4_tu(vd, vs2, rs1, vl); } -vint8mf2_t test_vxor_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vxor_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, + size_t vl) { return __riscv_vxor_vv_i8mf2_tu(vd, vs2, vs1, vl); } -vint8mf2_t test_vxor_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vxor_vx_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vxor_vx_i8mf2_tu(vd, vs2, rs1, vl); } -vint8m1_t test_vxor_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vxor_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, + size_t vl) { return __riscv_vxor_vv_i8m1_tu(vd, vs2, vs1, vl); } -vint8m1_t test_vxor_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vxor_vx_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, int8_t rs1, + size_t vl) { return __riscv_vxor_vx_i8m1_tu(vd, vs2, rs1, vl); } -vint8m2_t test_vxor_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vxor_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, + size_t vl) { return __riscv_vxor_vv_i8m2_tu(vd, vs2, vs1, vl); } -vint8m2_t test_vxor_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vxor_vx_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, int8_t rs1, + size_t vl) { return __riscv_vxor_vx_i8m2_tu(vd, vs2, rs1, vl); } -vint8m4_t test_vxor_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vxor_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, + size_t vl) { return __riscv_vxor_vv_i8m4_tu(vd, vs2, vs1, vl); } -vint8m4_t test_vxor_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vxor_vx_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, int8_t rs1, + size_t vl) { return __riscv_vxor_vx_i8m4_tu(vd, vs2, rs1, vl); } -vint8m8_t test_vxor_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vxor_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, + size_t vl) { return __riscv_vxor_vv_i8m8_tu(vd, vs2, vs1, vl); } -vint8m8_t test_vxor_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vxor_vx_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, int8_t rs1, + size_t vl) { return __riscv_vxor_vx_i8m8_tu(vd, vs2, rs1, vl); } -vint16mf4_t test_vxor_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vxor_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, + vint16mf4_t vs1, size_t vl) { return __riscv_vxor_vv_i16mf4_tu(vd, vs2, vs1, vl); } -vint16mf4_t test_vxor_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vxor_vx_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vxor_vx_i16mf4_tu(vd, vs2, rs1, vl); } -vint16mf2_t test_vxor_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vxor_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, + vint16mf2_t vs1, size_t vl) { return __riscv_vxor_vv_i16mf2_tu(vd, vs2, vs1, vl); } -vint16mf2_t test_vxor_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vxor_vx_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vxor_vx_i16mf2_tu(vd, vs2, rs1, vl); } -vint16m1_t test_vxor_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vxor_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, + size_t vl) { return __riscv_vxor_vv_i16m1_tu(vd, vs2, vs1, vl); } -vint16m1_t test_vxor_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vxor_vx_i16m1_tu(vint16m1_t vd, vint16m1_t vs2, int16_t rs1, + size_t vl) { return __riscv_vxor_vx_i16m1_tu(vd, vs2, rs1, vl); } -vint16m2_t test_vxor_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vxor_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, + size_t vl) { return __riscv_vxor_vv_i16m2_tu(vd, vs2, vs1, vl); } -vint16m2_t test_vxor_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vxor_vx_i16m2_tu(vint16m2_t vd, vint16m2_t vs2, int16_t rs1, + size_t vl) { return __riscv_vxor_vx_i16m2_tu(vd, vs2, rs1, vl); } -vint16m4_t test_vxor_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vxor_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, + size_t vl) { return __riscv_vxor_vv_i16m4_tu(vd, vs2, vs1, vl); } -vint16m4_t test_vxor_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vxor_vx_i16m4_tu(vint16m4_t vd, vint16m4_t vs2, int16_t rs1, + size_t vl) { return __riscv_vxor_vx_i16m4_tu(vd, vs2, rs1, vl); } -vint16m8_t test_vxor_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vxor_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, + size_t vl) { return __riscv_vxor_vv_i16m8_tu(vd, vs2, vs1, vl); } -vint16m8_t test_vxor_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vxor_vx_i16m8_tu(vint16m8_t vd, vint16m8_t vs2, int16_t rs1, + size_t vl) { return __riscv_vxor_vx_i16m8_tu(vd, vs2, rs1, vl); } -vint32mf2_t test_vxor_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vxor_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, + vint32mf2_t vs1, size_t vl) { return __riscv_vxor_vv_i32mf2_tu(vd, vs2, vs1, vl); } -vint32mf2_t test_vxor_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vxor_vx_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vxor_vx_i32mf2_tu(vd, vs2, rs1, vl); } -vint32m1_t test_vxor_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vxor_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, + size_t vl) { return __riscv_vxor_vv_i32m1_tu(vd, vs2, vs1, vl); } -vint32m1_t test_vxor_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vxor_vx_i32m1_tu(vint32m1_t vd, vint32m1_t vs2, int32_t rs1, + size_t vl) { return __riscv_vxor_vx_i32m1_tu(vd, vs2, rs1, vl); } -vint32m2_t test_vxor_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vxor_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, + size_t vl) { return __riscv_vxor_vv_i32m2_tu(vd, vs2, vs1, vl); } -vint32m2_t test_vxor_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vxor_vx_i32m2_tu(vint32m2_t vd, vint32m2_t vs2, int32_t rs1, + size_t vl) { return __riscv_vxor_vx_i32m2_tu(vd, vs2, rs1, vl); } -vint32m4_t test_vxor_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vxor_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, + size_t vl) { return __riscv_vxor_vv_i32m4_tu(vd, vs2, vs1, vl); } -vint32m4_t test_vxor_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vxor_vx_i32m4_tu(vint32m4_t vd, vint32m4_t vs2, int32_t rs1, + size_t vl) { return __riscv_vxor_vx_i32m4_tu(vd, vs2, rs1, vl); } -vint32m8_t test_vxor_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vxor_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, + size_t vl) { return __riscv_vxor_vv_i32m8_tu(vd, vs2, vs1, vl); } -vint32m8_t test_vxor_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vxor_vx_i32m8_tu(vint32m8_t vd, vint32m8_t vs2, int32_t rs1, + size_t vl) { return __riscv_vxor_vx_i32m8_tu(vd, vs2, rs1, vl); } -vint64m1_t test_vxor_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vxor_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, + size_t vl) { return __riscv_vxor_vv_i64m1_tu(vd, vs2, vs1, vl); } -vint64m1_t test_vxor_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vxor_vx_i64m1_tu(vint64m1_t vd, vint64m1_t vs2, int64_t rs1, + size_t vl) { return __riscv_vxor_vx_i64m1_tu(vd, vs2, rs1, vl); } -vint64m2_t test_vxor_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vxor_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, + size_t vl) { return __riscv_vxor_vv_i64m2_tu(vd, vs2, vs1, vl); } -vint64m2_t test_vxor_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vxor_vx_i64m2_tu(vint64m2_t vd, vint64m2_t vs2, int64_t rs1, + size_t vl) { return __riscv_vxor_vx_i64m2_tu(vd, vs2, rs1, vl); } -vint64m4_t test_vxor_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vxor_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, + size_t vl) { return __riscv_vxor_vv_i64m4_tu(vd, vs2, vs1, vl); } -vint64m4_t test_vxor_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vxor_vx_i64m4_tu(vint64m4_t vd, vint64m4_t vs2, int64_t rs1, + size_t vl) { return __riscv_vxor_vx_i64m4_tu(vd, vs2, rs1, vl); } -vint64m8_t test_vxor_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vxor_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, + size_t vl) { return __riscv_vxor_vv_i64m8_tu(vd, vs2, vs1, vl); } -vint64m8_t test_vxor_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vxor_vx_i64m8_tu(vint64m8_t vd, vint64m8_t vs2, int64_t rs1, + size_t vl) { return __riscv_vxor_vx_i64m8_tu(vd, vs2, rs1, vl); } -vuint8mf8_t test_vxor_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vxor_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vxor_vv_u8mf8_tu(vd, vs2, vs1, vl); } -vuint8mf8_t test_vxor_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vxor_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vxor_vx_u8mf8_tu(vd, vs2, rs1, vl); } -vuint8mf4_t test_vxor_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vxor_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vxor_vv_u8mf4_tu(vd, vs2, vs1, vl); } -vuint8mf4_t test_vxor_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vxor_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vxor_vx_u8mf4_tu(vd, vs2, rs1, vl); } -vuint8mf2_t test_vxor_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vxor_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vxor_vv_u8mf2_tu(vd, vs2, vs1, vl); } -vuint8mf2_t test_vxor_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vxor_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vxor_vx_u8mf2_tu(vd, vs2, rs1, vl); } -vuint8m1_t test_vxor_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vxor_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { return __riscv_vxor_vv_u8m1_tu(vd, vs2, vs1, vl); } -vuint8m1_t test_vxor_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vxor_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vxor_vx_u8m1_tu(vd, vs2, rs1, vl); } -vuint8m2_t test_vxor_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vxor_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { return __riscv_vxor_vv_u8m2_tu(vd, vs2, vs1, vl); } -vuint8m2_t test_vxor_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vxor_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vxor_vx_u8m2_tu(vd, vs2, rs1, vl); } -vuint8m4_t test_vxor_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vxor_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { return __riscv_vxor_vv_u8m4_tu(vd, vs2, vs1, vl); } -vuint8m4_t test_vxor_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vxor_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vxor_vx_u8m4_tu(vd, vs2, rs1, vl); } -vuint8m8_t test_vxor_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vxor_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { return __riscv_vxor_vv_u8m8_tu(vd, vs2, vs1, vl); } -vuint8m8_t test_vxor_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vxor_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { return __riscv_vxor_vx_u8m8_tu(vd, vs2, rs1, vl); } -vuint16mf4_t test_vxor_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vxor_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { return __riscv_vxor_vv_u16mf4_tu(vd, vs2, vs1, vl); } -vuint16mf4_t test_vxor_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vxor_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vxor_vx_u16mf4_tu(vd, vs2, rs1, vl); } -vuint16mf2_t test_vxor_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vxor_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { return __riscv_vxor_vv_u16mf2_tu(vd, vs2, vs1, vl); } -vuint16mf2_t test_vxor_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vxor_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vxor_vx_u16mf2_tu(vd, vs2, rs1, vl); } -vuint16m1_t test_vxor_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vxor_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vxor_vv_u16m1_tu(vd, vs2, vs1, vl); } -vuint16m1_t test_vxor_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vxor_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vxor_vx_u16m1_tu(vd, vs2, rs1, vl); } -vuint16m2_t test_vxor_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vxor_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vxor_vv_u16m2_tu(vd, vs2, vs1, vl); } -vuint16m2_t test_vxor_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vxor_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vxor_vx_u16m2_tu(vd, vs2, rs1, vl); } -vuint16m4_t test_vxor_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vxor_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vxor_vv_u16m4_tu(vd, vs2, vs1, vl); } -vuint16m4_t test_vxor_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vxor_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vxor_vx_u16m4_tu(vd, vs2, rs1, vl); } -vuint16m8_t test_vxor_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vxor_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vxor_vv_u16m8_tu(vd, vs2, vs1, vl); } -vuint16m8_t test_vxor_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vxor_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vxor_vx_u16m8_tu(vd, vs2, rs1, vl); } -vuint32mf2_t test_vxor_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vxor_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { return __riscv_vxor_vv_u32mf2_tu(vd, vs2, vs1, vl); } -vuint32mf2_t test_vxor_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vxor_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vxor_vx_u32mf2_tu(vd, vs2, rs1, vl); } -vuint32m1_t test_vxor_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vxor_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vxor_vv_u32m1_tu(vd, vs2, vs1, vl); } -vuint32m1_t test_vxor_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vxor_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vxor_vx_u32m1_tu(vd, vs2, rs1, vl); } -vuint32m2_t test_vxor_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vxor_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vxor_vv_u32m2_tu(vd, vs2, vs1, vl); } -vuint32m2_t test_vxor_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vxor_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vxor_vx_u32m2_tu(vd, vs2, rs1, vl); } -vuint32m4_t test_vxor_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vxor_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vxor_vv_u32m4_tu(vd, vs2, vs1, vl); } -vuint32m4_t test_vxor_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vxor_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vxor_vx_u32m4_tu(vd, vs2, rs1, vl); } -vuint32m8_t test_vxor_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vxor_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vxor_vv_u32m8_tu(vd, vs2, vs1, vl); } -vuint32m8_t test_vxor_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vxor_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vxor_vx_u32m8_tu(vd, vs2, rs1, vl); } -vuint64m1_t test_vxor_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vxor_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vxor_vv_u64m1_tu(vd, vs2, vs1, vl); } -vuint64m1_t test_vxor_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vxor_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vxor_vx_u64m1_tu(vd, vs2, rs1, vl); } -vuint64m2_t test_vxor_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vxor_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vxor_vv_u64m2_tu(vd, vs2, vs1, vl); } -vuint64m2_t test_vxor_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vxor_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vxor_vx_u64m2_tu(vd, vs2, rs1, vl); } -vuint64m4_t test_vxor_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vxor_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vxor_vv_u64m4_tu(vd, vs2, vs1, vl); } -vuint64m4_t test_vxor_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vxor_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vxor_vx_u64m4_tu(vd, vs2, rs1, vl); } -vuint64m8_t test_vxor_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vxor_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vxor_vv_u64m8_tu(vd, vs2, vs1, vl); } -vuint64m8_t test_vxor_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vxor_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, + size_t vl) { return __riscv_vxor_vx_u64m8_tu(vd, vs2, rs1, vl); } -vint8mf8_t test_vxor_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vxor_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vxor_vv_i8mf8_tum(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vxor_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vxor_vx_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vxor_vx_i8mf8_tum(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vxor_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vxor_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vxor_vv_i8mf4_tum(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vxor_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vxor_vx_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vxor_vx_i8mf4_tum(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vxor_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vxor_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vxor_vv_i8mf2_tum(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vxor_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vxor_vx_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vxor_vx_i8mf2_tum(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vxor_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vxor_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vxor_vv_i8m1_tum(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vxor_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vxor_vx_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vxor_vx_i8m1_tum(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vxor_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vxor_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vxor_vv_i8m2_tum(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vxor_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vxor_vx_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vxor_vx_i8m2_tum(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vxor_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vxor_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vxor_vv_i8m4_tum(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vxor_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vxor_vx_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vxor_vx_i8m4_tum(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vxor_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vxor_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vxor_vv_i8m8_tum(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vxor_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vxor_vx_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vxor_vx_i8m8_tum(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vxor_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vxor_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vxor_vv_i16mf4_tum(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vxor_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vxor_vx_i16mf4_tum(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vxor_vx_i16mf4_tum(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vxor_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vxor_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vxor_vv_i16mf2_tum(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vxor_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vxor_vx_i16mf2_tum(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vxor_vx_i16mf2_tum(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vxor_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vxor_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vxor_vv_i16m1_tum(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vxor_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vxor_vx_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vxor_vx_i16m1_tum(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vxor_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vxor_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vxor_vv_i16m2_tum(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vxor_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vxor_vx_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vxor_vx_i16m2_tum(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vxor_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vxor_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vxor_vv_i16m4_tum(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vxor_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vxor_vx_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vxor_vx_i16m4_tum(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vxor_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vxor_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vxor_vv_i16m8_tum(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vxor_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vxor_vx_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vxor_vx_i16m8_tum(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vxor_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vxor_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vxor_vv_i32mf2_tum(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vxor_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vxor_vx_i32mf2_tum(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vxor_vx_i32mf2_tum(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vxor_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vxor_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vxor_vv_i32m1_tum(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vxor_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vxor_vx_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vxor_vx_i32m1_tum(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vxor_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vxor_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vxor_vv_i32m2_tum(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vxor_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vxor_vx_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vxor_vx_i32m2_tum(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vxor_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vxor_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vxor_vv_i32m4_tum(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vxor_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vxor_vx_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vxor_vx_i32m4_tum(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vxor_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vxor_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vxor_vv_i32m8_tum(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vxor_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vxor_vx_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vxor_vx_i32m8_tum(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vxor_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vxor_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vxor_vv_i64m1_tum(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vxor_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vxor_vx_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vxor_vx_i64m1_tum(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vxor_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vxor_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vxor_vv_i64m2_tum(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vxor_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vxor_vx_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vxor_vx_i64m2_tum(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vxor_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vxor_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vxor_vv_i64m4_tum(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vxor_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vxor_vx_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vxor_vx_i64m4_tum(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vxor_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vxor_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vxor_vv_i64m8_tum(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vxor_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vxor_vx_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vxor_vx_i64m8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vxor_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vxor_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vxor_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vxor_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vxor_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vxor_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vxor_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vxor_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vxor_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vxor_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vxor_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vxor_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vxor_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vxor_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vxor_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vxor_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vxor_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vxor_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vxor_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vxor_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vxor_vv_u8m1_tum(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vxor_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vxor_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vxor_vx_u8m1_tum(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vxor_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vxor_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vxor_vv_u8m2_tum(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vxor_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vxor_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vxor_vx_u8m2_tum(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vxor_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vxor_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vxor_vv_u8m4_tum(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vxor_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vxor_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vxor_vx_u8m4_tum(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vxor_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vxor_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vxor_vv_u8m8_tum(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vxor_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vxor_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vxor_vx_u8m8_tum(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vxor_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vxor_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vxor_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vxor_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vxor_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vxor_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vxor_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vxor_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vxor_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vxor_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vxor_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vxor_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vxor_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vxor_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vxor_vv_u16m1_tum(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vxor_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vxor_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vxor_vx_u16m1_tum(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vxor_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vxor_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vxor_vv_u16m2_tum(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vxor_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vxor_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vxor_vx_u16m2_tum(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vxor_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vxor_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vxor_vv_u16m4_tum(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vxor_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vxor_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vxor_vx_u16m4_tum(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vxor_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vxor_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vxor_vv_u16m8_tum(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vxor_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vxor_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vxor_vx_u16m8_tum(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vxor_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vxor_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vxor_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vxor_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vxor_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vxor_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vxor_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vxor_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vxor_vv_u32m1_tum(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vxor_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vxor_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vxor_vx_u32m1_tum(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vxor_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vxor_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vxor_vv_u32m2_tum(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vxor_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vxor_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vxor_vx_u32m2_tum(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vxor_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vxor_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vxor_vv_u32m4_tum(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vxor_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vxor_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vxor_vx_u32m4_tum(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vxor_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vxor_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vxor_vv_u32m8_tum(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vxor_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vxor_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vxor_vx_u32m8_tum(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vxor_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vxor_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vxor_vv_u64m1_tum(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vxor_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vxor_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vxor_vx_u64m1_tum(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vxor_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vxor_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vxor_vv_u64m2_tum(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vxor_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vxor_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vxor_vx_u64m2_tum(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vxor_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vxor_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vxor_vv_u64m4_tum(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vxor_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vxor_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vxor_vx_u64m4_tum(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vxor_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vxor_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vxor_vv_u64m8_tum(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vxor_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vxor_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vxor_vx_u64m8_tum(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vxor_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vxor_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vxor_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vxor_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vxor_vx_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vxor_vx_i8mf8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vxor_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vxor_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vxor_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vxor_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vxor_vx_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vxor_vx_i8mf4_tumu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vxor_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vxor_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vxor_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vxor_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vxor_vx_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vxor_vx_i8mf2_tumu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vxor_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vxor_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vxor_vv_i8m1_tumu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vxor_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vxor_vx_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vxor_vx_i8m1_tumu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vxor_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vxor_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vxor_vv_i8m2_tumu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vxor_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vxor_vx_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vxor_vx_i8m2_tumu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vxor_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vxor_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vxor_vv_i8m4_tumu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vxor_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vxor_vx_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vxor_vx_i8m4_tumu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vxor_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vxor_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vxor_vv_i8m8_tumu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vxor_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vxor_vx_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vxor_vx_i8m8_tumu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vxor_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vxor_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vxor_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vxor_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vxor_vx_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vxor_vx_i16mf4_tumu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vxor_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vxor_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vxor_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vxor_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vxor_vx_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vxor_vx_i16mf2_tumu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vxor_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vxor_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vxor_vv_i16m1_tumu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vxor_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vxor_vx_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vxor_vx_i16m1_tumu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vxor_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vxor_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vxor_vv_i16m2_tumu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vxor_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vxor_vx_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vxor_vx_i16m2_tumu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vxor_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vxor_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vxor_vv_i16m4_tumu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vxor_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vxor_vx_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vxor_vx_i16m4_tumu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vxor_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vxor_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vxor_vv_i16m8_tumu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vxor_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vxor_vx_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vxor_vx_i16m8_tumu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vxor_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vxor_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vxor_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vxor_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vxor_vx_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vxor_vx_i32mf2_tumu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vxor_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vxor_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vxor_vv_i32m1_tumu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vxor_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vxor_vx_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vxor_vx_i32m1_tumu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vxor_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vxor_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vxor_vv_i32m2_tumu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vxor_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vxor_vx_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vxor_vx_i32m2_tumu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vxor_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vxor_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vxor_vv_i32m4_tumu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vxor_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vxor_vx_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vxor_vx_i32m4_tumu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vxor_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vxor_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vxor_vv_i32m8_tumu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vxor_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vxor_vx_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vxor_vx_i32m8_tumu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vxor_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vxor_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vxor_vv_i64m1_tumu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vxor_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vxor_vx_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vxor_vx_i64m1_tumu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vxor_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vxor_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vxor_vv_i64m2_tumu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vxor_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vxor_vx_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vxor_vx_i64m2_tumu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vxor_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vxor_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vxor_vv_i64m4_tumu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vxor_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vxor_vx_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vxor_vx_i64m4_tumu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vxor_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vxor_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vxor_vv_i64m8_tumu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vxor_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vxor_vx_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vxor_vx_i64m8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vxor_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vxor_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { return __riscv_vxor_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vxor_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vxor_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { return __riscv_vxor_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vxor_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vxor_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { return __riscv_vxor_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vxor_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vxor_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { return __riscv_vxor_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vxor_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vxor_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { return __riscv_vxor_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vxor_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vxor_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { return __riscv_vxor_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vxor_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vxor_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vxor_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vxor_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vxor_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vxor_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vxor_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vxor_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vxor_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vxor_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vxor_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vxor_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vxor_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vxor_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vxor_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vxor_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vxor_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vxor_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vxor_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vxor_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vxor_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vxor_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vxor_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vxor_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vxor_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vxor_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vxor_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vxor_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vxor_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vxor_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vxor_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vxor_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vxor_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vxor_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vxor_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { return __riscv_vxor_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vxor_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vxor_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { return __riscv_vxor_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vxor_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vxor_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { return __riscv_vxor_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vxor_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vxor_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { return __riscv_vxor_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vxor_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vxor_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vxor_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vxor_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vxor_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { return __riscv_vxor_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vxor_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vxor_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vxor_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vxor_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vxor_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { return __riscv_vxor_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vxor_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vxor_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { return __riscv_vxor_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vxor_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vxor_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vxor_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vxor_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vxor_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { return __riscv_vxor_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vxor_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vxor_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { return __riscv_vxor_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vxor_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vxor_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { return __riscv_vxor_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vxor_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vxor_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { return __riscv_vxor_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vxor_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vxor_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vxor_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vxor_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vxor_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { return __riscv_vxor_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vxor_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vxor_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { return __riscv_vxor_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vxor_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vxor_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { return __riscv_vxor_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vxor_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vxor_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { return __riscv_vxor_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vxor_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vxor_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { return __riscv_vxor_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vxor_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vxor_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { return __riscv_vxor_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vxor_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vxor_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { return __riscv_vxor_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vxor_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vxor_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { return __riscv_vxor_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vxor_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vxor_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { return __riscv_vxor_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vxor_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vxor_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { return __riscv_vxor_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vxor_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vxor_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { return __riscv_vxor_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vxor_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vxor_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { return __riscv_vxor_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); } -vint8mf8_t test_vxor_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) { +vint8mf8_t test_vxor_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + vint8mf8_t vs1, size_t vl) { return __riscv_vxor_vv_i8mf8_mu(vm, vd, vs2, vs1, vl); } -vint8mf8_t test_vxor_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, int8_t rs1, size_t vl) { +vint8mf8_t test_vxor_vx_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vxor_vx_i8mf8_mu(vm, vd, vs2, rs1, vl); } -vint8mf4_t test_vxor_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) { +vint8mf4_t test_vxor_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + vint8mf4_t vs1, size_t vl) { return __riscv_vxor_vv_i8mf4_mu(vm, vd, vs2, vs1, vl); } -vint8mf4_t test_vxor_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, int8_t rs1, size_t vl) { +vint8mf4_t test_vxor_vx_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vxor_vx_i8mf4_mu(vm, vd, vs2, rs1, vl); } -vint8mf2_t test_vxor_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) { +vint8mf2_t test_vxor_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + vint8mf2_t vs1, size_t vl) { return __riscv_vxor_vv_i8mf2_mu(vm, vd, vs2, vs1, vl); } -vint8mf2_t test_vxor_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, int8_t rs1, size_t vl) { +vint8mf2_t test_vxor_vx_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vxor_vx_i8mf2_mu(vm, vd, vs2, rs1, vl); } -vint8m1_t test_vxor_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1, size_t vl) { +vint8m1_t test_vxor_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + vint8m1_t vs1, size_t vl) { return __riscv_vxor_vv_i8m1_mu(vm, vd, vs2, vs1, vl); } -vint8m1_t test_vxor_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, int8_t rs1, size_t vl) { +vint8m1_t test_vxor_vx_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2, + int8_t rs1, size_t vl) { return __riscv_vxor_vx_i8m1_mu(vm, vd, vs2, rs1, vl); } -vint8m2_t test_vxor_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1, size_t vl) { +vint8m2_t test_vxor_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + vint8m2_t vs1, size_t vl) { return __riscv_vxor_vv_i8m2_mu(vm, vd, vs2, vs1, vl); } -vint8m2_t test_vxor_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, int8_t rs1, size_t vl) { +vint8m2_t test_vxor_vx_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2, + int8_t rs1, size_t vl) { return __riscv_vxor_vx_i8m2_mu(vm, vd, vs2, rs1, vl); } -vint8m4_t test_vxor_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1, size_t vl) { +vint8m4_t test_vxor_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + vint8m4_t vs1, size_t vl) { return __riscv_vxor_vv_i8m4_mu(vm, vd, vs2, vs1, vl); } -vint8m4_t test_vxor_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, int8_t rs1, size_t vl) { +vint8m4_t test_vxor_vx_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2, + int8_t rs1, size_t vl) { return __riscv_vxor_vx_i8m4_mu(vm, vd, vs2, rs1, vl); } -vint8m8_t test_vxor_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1, size_t vl) { +vint8m8_t test_vxor_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + vint8m8_t vs1, size_t vl) { return __riscv_vxor_vv_i8m8_mu(vm, vd, vs2, vs1, vl); } -vint8m8_t test_vxor_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, int8_t rs1, size_t vl) { +vint8m8_t test_vxor_vx_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2, + int8_t rs1, size_t vl) { return __riscv_vxor_vx_i8m8_mu(vm, vd, vs2, rs1, vl); } -vint16mf4_t test_vxor_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) { +vint16mf4_t test_vxor_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, vint16mf4_t vs1, + size_t vl) { return __riscv_vxor_vv_i16mf4_mu(vm, vd, vs2, vs1, vl); } -vint16mf4_t test_vxor_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, vint16mf4_t vs2, int16_t rs1, size_t vl) { +vint16mf4_t test_vxor_vx_i16mf4_mu(vbool64_t vm, vint16mf4_t vd, + vint16mf4_t vs2, int16_t rs1, size_t vl) { return __riscv_vxor_vx_i16mf4_mu(vm, vd, vs2, rs1, vl); } -vint16mf2_t test_vxor_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) { +vint16mf2_t test_vxor_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, vint16mf2_t vs1, + size_t vl) { return __riscv_vxor_vv_i16mf2_mu(vm, vd, vs2, vs1, vl); } -vint16mf2_t test_vxor_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, vint16mf2_t vs2, int16_t rs1, size_t vl) { +vint16mf2_t test_vxor_vx_i16mf2_mu(vbool32_t vm, vint16mf2_t vd, + vint16mf2_t vs2, int16_t rs1, size_t vl) { return __riscv_vxor_vx_i16mf2_mu(vm, vd, vs2, rs1, vl); } -vint16m1_t test_vxor_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, vint16m1_t vs1, size_t vl) { +vint16m1_t test_vxor_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + vint16m1_t vs1, size_t vl) { return __riscv_vxor_vv_i16m1_mu(vm, vd, vs2, vs1, vl); } -vint16m1_t test_vxor_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, int16_t rs1, size_t vl) { +vint16m1_t test_vxor_vx_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2, + int16_t rs1, size_t vl) { return __riscv_vxor_vx_i16m1_mu(vm, vd, vs2, rs1, vl); } -vint16m2_t test_vxor_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, vint16m2_t vs1, size_t vl) { +vint16m2_t test_vxor_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + vint16m2_t vs1, size_t vl) { return __riscv_vxor_vv_i16m2_mu(vm, vd, vs2, vs1, vl); } -vint16m2_t test_vxor_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, int16_t rs1, size_t vl) { +vint16m2_t test_vxor_vx_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2, + int16_t rs1, size_t vl) { return __riscv_vxor_vx_i16m2_mu(vm, vd, vs2, rs1, vl); } -vint16m4_t test_vxor_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, vint16m4_t vs1, size_t vl) { +vint16m4_t test_vxor_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + vint16m4_t vs1, size_t vl) { return __riscv_vxor_vv_i16m4_mu(vm, vd, vs2, vs1, vl); } -vint16m4_t test_vxor_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, int16_t rs1, size_t vl) { +vint16m4_t test_vxor_vx_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2, + int16_t rs1, size_t vl) { return __riscv_vxor_vx_i16m4_mu(vm, vd, vs2, rs1, vl); } -vint16m8_t test_vxor_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, vint16m8_t vs1, size_t vl) { +vint16m8_t test_vxor_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + vint16m8_t vs1, size_t vl) { return __riscv_vxor_vv_i16m8_mu(vm, vd, vs2, vs1, vl); } -vint16m8_t test_vxor_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, int16_t rs1, size_t vl) { +vint16m8_t test_vxor_vx_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2, + int16_t rs1, size_t vl) { return __riscv_vxor_vx_i16m8_mu(vm, vd, vs2, rs1, vl); } -vint32mf2_t test_vxor_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) { +vint32mf2_t test_vxor_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, vint32mf2_t vs1, + size_t vl) { return __riscv_vxor_vv_i32mf2_mu(vm, vd, vs2, vs1, vl); } -vint32mf2_t test_vxor_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, vint32mf2_t vs2, int32_t rs1, size_t vl) { +vint32mf2_t test_vxor_vx_i32mf2_mu(vbool64_t vm, vint32mf2_t vd, + vint32mf2_t vs2, int32_t rs1, size_t vl) { return __riscv_vxor_vx_i32mf2_mu(vm, vd, vs2, rs1, vl); } -vint32m1_t test_vxor_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, vint32m1_t vs1, size_t vl) { +vint32m1_t test_vxor_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + vint32m1_t vs1, size_t vl) { return __riscv_vxor_vv_i32m1_mu(vm, vd, vs2, vs1, vl); } -vint32m1_t test_vxor_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, int32_t rs1, size_t vl) { +vint32m1_t test_vxor_vx_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2, + int32_t rs1, size_t vl) { return __riscv_vxor_vx_i32m1_mu(vm, vd, vs2, rs1, vl); } -vint32m2_t test_vxor_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, vint32m2_t vs1, size_t vl) { +vint32m2_t test_vxor_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + vint32m2_t vs1, size_t vl) { return __riscv_vxor_vv_i32m2_mu(vm, vd, vs2, vs1, vl); } -vint32m2_t test_vxor_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, int32_t rs1, size_t vl) { +vint32m2_t test_vxor_vx_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2, + int32_t rs1, size_t vl) { return __riscv_vxor_vx_i32m2_mu(vm, vd, vs2, rs1, vl); } -vint32m4_t test_vxor_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, vint32m4_t vs1, size_t vl) { +vint32m4_t test_vxor_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + vint32m4_t vs1, size_t vl) { return __riscv_vxor_vv_i32m4_mu(vm, vd, vs2, vs1, vl); } -vint32m4_t test_vxor_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, int32_t rs1, size_t vl) { +vint32m4_t test_vxor_vx_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2, + int32_t rs1, size_t vl) { return __riscv_vxor_vx_i32m4_mu(vm, vd, vs2, rs1, vl); } -vint32m8_t test_vxor_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, vint32m8_t vs1, size_t vl) { +vint32m8_t test_vxor_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + vint32m8_t vs1, size_t vl) { return __riscv_vxor_vv_i32m8_mu(vm, vd, vs2, vs1, vl); } -vint32m8_t test_vxor_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, int32_t rs1, size_t vl) { +vint32m8_t test_vxor_vx_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2, + int32_t rs1, size_t vl) { return __riscv_vxor_vx_i32m8_mu(vm, vd, vs2, rs1, vl); } -vint64m1_t test_vxor_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, vint64m1_t vs1, size_t vl) { +vint64m1_t test_vxor_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + vint64m1_t vs1, size_t vl) { return __riscv_vxor_vv_i64m1_mu(vm, vd, vs2, vs1, vl); } -vint64m1_t test_vxor_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, int64_t rs1, size_t vl) { +vint64m1_t test_vxor_vx_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2, + int64_t rs1, size_t vl) { return __riscv_vxor_vx_i64m1_mu(vm, vd, vs2, rs1, vl); } -vint64m2_t test_vxor_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, vint64m2_t vs1, size_t vl) { +vint64m2_t test_vxor_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + vint64m2_t vs1, size_t vl) { return __riscv_vxor_vv_i64m2_mu(vm, vd, vs2, vs1, vl); } -vint64m2_t test_vxor_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, int64_t rs1, size_t vl) { +vint64m2_t test_vxor_vx_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2, + int64_t rs1, size_t vl) { return __riscv_vxor_vx_i64m2_mu(vm, vd, vs2, rs1, vl); } -vint64m4_t test_vxor_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, vint64m4_t vs1, size_t vl) { +vint64m4_t test_vxor_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + vint64m4_t vs1, size_t vl) { return __riscv_vxor_vv_i64m4_mu(vm, vd, vs2, vs1, vl); } -vint64m4_t test_vxor_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, int64_t rs1, size_t vl) { +vint64m4_t test_vxor_vx_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2, + int64_t rs1, size_t vl) { return __riscv_vxor_vx_i64m4_mu(vm, vd, vs2, rs1, vl); } -vint64m8_t test_vxor_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, vint64m8_t vs1, size_t vl) { +vint64m8_t test_vxor_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + vint64m8_t vs1, size_t vl) { return __riscv_vxor_vv_i64m8_mu(vm, vd, vs2, vs1, vl); } -vint64m8_t test_vxor_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, int64_t rs1, size_t vl) { +vint64m8_t test_vxor_vx_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2, + int64_t rs1, size_t vl) { return __riscv_vxor_vx_i64m8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf8_t test_vxor_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { +vuint8mf8_t test_vxor_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { return __riscv_vxor_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); } -vuint8mf8_t test_vxor_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { +vuint8mf8_t test_vxor_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vxor_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); } -vuint8mf4_t test_vxor_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { +vuint8mf4_t test_vxor_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { return __riscv_vxor_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); } -vuint8mf4_t test_vxor_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { +vuint8mf4_t test_vxor_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vxor_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); } -vuint8mf2_t test_vxor_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { +vuint8mf2_t test_vxor_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { return __riscv_vxor_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); } -vuint8mf2_t test_vxor_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { +vuint8mf2_t test_vxor_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vxor_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); } -vuint8m1_t test_vxor_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { +vuint8m1_t test_vxor_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { return __riscv_vxor_vv_u8m1_mu(vm, vd, vs2, vs1, vl); } -vuint8m1_t test_vxor_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { +vuint8m1_t test_vxor_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vxor_vx_u8m1_mu(vm, vd, vs2, rs1, vl); } -vuint8m2_t test_vxor_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { +vuint8m2_t test_vxor_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { return __riscv_vxor_vv_u8m2_mu(vm, vd, vs2, vs1, vl); } -vuint8m2_t test_vxor_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { +vuint8m2_t test_vxor_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vxor_vx_u8m2_mu(vm, vd, vs2, rs1, vl); } -vuint8m4_t test_vxor_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { +vuint8m4_t test_vxor_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { return __riscv_vxor_vv_u8m4_mu(vm, vd, vs2, vs1, vl); } -vuint8m4_t test_vxor_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { +vuint8m4_t test_vxor_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vxor_vx_u8m4_mu(vm, vd, vs2, rs1, vl); } -vuint8m8_t test_vxor_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { +vuint8m8_t test_vxor_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { return __riscv_vxor_vv_u8m8_mu(vm, vd, vs2, vs1, vl); } -vuint8m8_t test_vxor_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { +vuint8m8_t test_vxor_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { return __riscv_vxor_vx_u8m8_mu(vm, vd, vs2, rs1, vl); } -vuint16mf4_t test_vxor_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { +vuint16mf4_t test_vxor_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { return __riscv_vxor_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); } -vuint16mf4_t test_vxor_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { +vuint16mf4_t test_vxor_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, size_t vl) { return __riscv_vxor_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); } -vuint16mf2_t test_vxor_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { +vuint16mf2_t test_vxor_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { return __riscv_vxor_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); } -vuint16mf2_t test_vxor_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { +vuint16mf2_t test_vxor_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, size_t vl) { return __riscv_vxor_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); } -vuint16m1_t test_vxor_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { +vuint16m1_t test_vxor_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { return __riscv_vxor_vv_u16m1_mu(vm, vd, vs2, vs1, vl); } -vuint16m1_t test_vxor_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { +vuint16m1_t test_vxor_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vxor_vx_u16m1_mu(vm, vd, vs2, rs1, vl); } -vuint16m2_t test_vxor_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { +vuint16m2_t test_vxor_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { return __riscv_vxor_vv_u16m2_mu(vm, vd, vs2, vs1, vl); } -vuint16m2_t test_vxor_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { +vuint16m2_t test_vxor_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vxor_vx_u16m2_mu(vm, vd, vs2, rs1, vl); } -vuint16m4_t test_vxor_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { +vuint16m4_t test_vxor_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { return __riscv_vxor_vv_u16m4_mu(vm, vd, vs2, vs1, vl); } -vuint16m4_t test_vxor_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { +vuint16m4_t test_vxor_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vxor_vx_u16m4_mu(vm, vd, vs2, rs1, vl); } -vuint16m8_t test_vxor_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { +vuint16m8_t test_vxor_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { return __riscv_vxor_vv_u16m8_mu(vm, vd, vs2, vs1, vl); } -vuint16m8_t test_vxor_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { +vuint16m8_t test_vxor_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { return __riscv_vxor_vx_u16m8_mu(vm, vd, vs2, rs1, vl); } -vuint32mf2_t test_vxor_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { +vuint32mf2_t test_vxor_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { return __riscv_vxor_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); } -vuint32mf2_t test_vxor_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { +vuint32mf2_t test_vxor_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, size_t vl) { return __riscv_vxor_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); } -vuint32m1_t test_vxor_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { +vuint32m1_t test_vxor_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { return __riscv_vxor_vv_u32m1_mu(vm, vd, vs2, vs1, vl); } -vuint32m1_t test_vxor_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { +vuint32m1_t test_vxor_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vxor_vx_u32m1_mu(vm, vd, vs2, rs1, vl); } -vuint32m2_t test_vxor_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { +vuint32m2_t test_vxor_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { return __riscv_vxor_vv_u32m2_mu(vm, vd, vs2, vs1, vl); } -vuint32m2_t test_vxor_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { +vuint32m2_t test_vxor_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vxor_vx_u32m2_mu(vm, vd, vs2, rs1, vl); } -vuint32m4_t test_vxor_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { +vuint32m4_t test_vxor_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { return __riscv_vxor_vv_u32m4_mu(vm, vd, vs2, vs1, vl); } -vuint32m4_t test_vxor_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { +vuint32m4_t test_vxor_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vxor_vx_u32m4_mu(vm, vd, vs2, rs1, vl); } -vuint32m8_t test_vxor_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { +vuint32m8_t test_vxor_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { return __riscv_vxor_vv_u32m8_mu(vm, vd, vs2, vs1, vl); } -vuint32m8_t test_vxor_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { +vuint32m8_t test_vxor_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { return __riscv_vxor_vx_u32m8_mu(vm, vd, vs2, rs1, vl); } -vuint64m1_t test_vxor_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { +vuint64m1_t test_vxor_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { return __riscv_vxor_vv_u64m1_mu(vm, vd, vs2, vs1, vl); } -vuint64m1_t test_vxor_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { +vuint64m1_t test_vxor_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vxor_vx_u64m1_mu(vm, vd, vs2, rs1, vl); } -vuint64m2_t test_vxor_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { +vuint64m2_t test_vxor_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { return __riscv_vxor_vv_u64m2_mu(vm, vd, vs2, vs1, vl); } -vuint64m2_t test_vxor_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { +vuint64m2_t test_vxor_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vxor_vx_u64m2_mu(vm, vd, vs2, rs1, vl); } -vuint64m4_t test_vxor_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { +vuint64m4_t test_vxor_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { return __riscv_vxor_vv_u64m4_mu(vm, vd, vs2, vs1, vl); } -vuint64m4_t test_vxor_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { +vuint64m4_t test_vxor_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vxor_vx_u64m4_mu(vm, vd, vs2, rs1, vl); } -vuint64m8_t test_vxor_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { +vuint64m8_t test_vxor_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { return __riscv_vxor_vv_u64m8_mu(vm, vd, vs2, vs1, vl); } -vuint64m8_t test_vxor_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { +vuint64m8_t test_vxor_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { return __riscv_vxor_vx_u64m8_mu(vm, vd, vs2, rs1, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vzext_vf2.c b/auto-generated/policy_funcs/llvm-api-tests/vzext_vf2.c index d323476dd..f7d9e6e7f 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vzext_vf2.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vzext_vf2.c @@ -5,15 +5,18 @@ #include -vuint16mf4_t test_vzext_vf2_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) { +vuint16mf4_t test_vzext_vf2_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, + size_t vl) { return __riscv_vzext_vf2_u16mf4_tu(vd, vs2, vl); } -vuint16mf2_t test_vzext_vf2_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) { +vuint16mf2_t test_vzext_vf2_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, + size_t vl) { return __riscv_vzext_vf2_u16mf2_tu(vd, vs2, vl); } -vuint16m1_t test_vzext_vf2_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) { +vuint16m1_t test_vzext_vf2_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, + size_t vl) { return __riscv_vzext_vf2_u16m1_tu(vd, vs2, vl); } @@ -29,218 +32,272 @@ vuint16m8_t test_vzext_vf2_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, size_t vl) { return __riscv_vzext_vf2_u16m8_tu(vd, vs2, vl); } -vuint32mf2_t test_vzext_vf2_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) { +vuint32mf2_t test_vzext_vf2_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, + size_t vl) { return __riscv_vzext_vf2_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vzext_vf2_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) { +vuint32m1_t test_vzext_vf2_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, + size_t vl) { return __riscv_vzext_vf2_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vzext_vf2_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, size_t vl) { +vuint32m2_t test_vzext_vf2_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, + size_t vl) { return __riscv_vzext_vf2_u32m2_tu(vd, vs2, vl); } -vuint32m4_t test_vzext_vf2_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, size_t vl) { +vuint32m4_t test_vzext_vf2_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, + size_t vl) { return __riscv_vzext_vf2_u32m4_tu(vd, vs2, vl); } -vuint32m8_t test_vzext_vf2_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, size_t vl) { +vuint32m8_t test_vzext_vf2_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, + size_t vl) { return __riscv_vzext_vf2_u32m8_tu(vd, vs2, vl); } -vuint64m1_t test_vzext_vf2_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint64m1_t test_vzext_vf2_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, + size_t vl) { return __riscv_vzext_vf2_u64m1_tu(vd, vs2, vl); } -vuint64m2_t test_vzext_vf2_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint64m2_t test_vzext_vf2_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, + size_t vl) { return __riscv_vzext_vf2_u64m2_tu(vd, vs2, vl); } -vuint64m4_t test_vzext_vf2_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint64m4_t test_vzext_vf2_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, + size_t vl) { return __riscv_vzext_vf2_u64m4_tu(vd, vs2, vl); } -vuint64m8_t test_vzext_vf2_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint64m8_t test_vzext_vf2_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, + size_t vl) { return __riscv_vzext_vf2_u64m8_tu(vd, vs2, vl); } -vuint16mf4_t test_vzext_vf2_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) { +vuint16mf4_t test_vzext_vf2_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t vl) { return __riscv_vzext_vf2_u16mf4_tum(vm, vd, vs2, vl); } -vuint16mf2_t test_vzext_vf2_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) { +vuint16mf2_t test_vzext_vf2_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t vl) { return __riscv_vzext_vf2_u16mf2_tum(vm, vd, vs2, vl); } -vuint16m1_t test_vzext_vf2_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) { +vuint16m1_t test_vzext_vf2_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t vl) { return __riscv_vzext_vf2_u16m1_tum(vm, vd, vs2, vl); } -vuint16m2_t test_vzext_vf2_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t vl) { +vuint16m2_t test_vzext_vf2_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, size_t vl) { return __riscv_vzext_vf2_u16m2_tum(vm, vd, vs2, vl); } -vuint16m4_t test_vzext_vf2_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t vl) { +vuint16m4_t test_vzext_vf2_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, size_t vl) { return __riscv_vzext_vf2_u16m4_tum(vm, vd, vs2, vl); } -vuint16m8_t test_vzext_vf2_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t vl) { +vuint16m8_t test_vzext_vf2_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, size_t vl) { return __riscv_vzext_vf2_u16m8_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vzext_vf2_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) { +vuint32mf2_t test_vzext_vf2_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t vl) { return __riscv_vzext_vf2_u32mf2_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vzext_vf2_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) { +vuint32m1_t test_vzext_vf2_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t vl) { return __riscv_vzext_vf2_u32m1_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vzext_vf2_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t vl) { +vuint32m2_t test_vzext_vf2_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t vl) { return __riscv_vzext_vf2_u32m2_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vzext_vf2_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t vl) { +vuint32m4_t test_vzext_vf2_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, size_t vl) { return __riscv_vzext_vf2_u32m4_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vzext_vf2_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t vl) { +vuint32m8_t test_vzext_vf2_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, size_t vl) { return __riscv_vzext_vf2_u32m8_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vzext_vf2_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint64m1_t test_vzext_vf2_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t vl) { return __riscv_vzext_vf2_u64m1_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vzext_vf2_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint64m2_t test_vzext_vf2_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t vl) { return __riscv_vzext_vf2_u64m2_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vzext_vf2_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint64m4_t test_vzext_vf2_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t vl) { return __riscv_vzext_vf2_u64m4_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vzext_vf2_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint64m8_t test_vzext_vf2_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, size_t vl) { return __riscv_vzext_vf2_u64m8_tum(vm, vd, vs2, vl); } -vuint16mf4_t test_vzext_vf2_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) { +vuint16mf4_t test_vzext_vf2_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t vl) { return __riscv_vzext_vf2_u16mf4_tumu(vm, vd, vs2, vl); } -vuint16mf2_t test_vzext_vf2_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) { +vuint16mf2_t test_vzext_vf2_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t vl) { return __riscv_vzext_vf2_u16mf2_tumu(vm, vd, vs2, vl); } -vuint16m1_t test_vzext_vf2_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) { +vuint16m1_t test_vzext_vf2_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t vl) { return __riscv_vzext_vf2_u16m1_tumu(vm, vd, vs2, vl); } -vuint16m2_t test_vzext_vf2_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t vl) { +vuint16m2_t test_vzext_vf2_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, size_t vl) { return __riscv_vzext_vf2_u16m2_tumu(vm, vd, vs2, vl); } -vuint16m4_t test_vzext_vf2_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t vl) { +vuint16m4_t test_vzext_vf2_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, size_t vl) { return __riscv_vzext_vf2_u16m4_tumu(vm, vd, vs2, vl); } -vuint16m8_t test_vzext_vf2_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t vl) { +vuint16m8_t test_vzext_vf2_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, size_t vl) { return __riscv_vzext_vf2_u16m8_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vzext_vf2_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) { +vuint32mf2_t test_vzext_vf2_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t vl) { return __riscv_vzext_vf2_u32mf2_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vzext_vf2_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) { +vuint32m1_t test_vzext_vf2_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t vl) { return __riscv_vzext_vf2_u32m1_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vzext_vf2_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t vl) { +vuint32m2_t test_vzext_vf2_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t vl) { return __riscv_vzext_vf2_u32m2_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vzext_vf2_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t vl) { +vuint32m4_t test_vzext_vf2_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, size_t vl) { return __riscv_vzext_vf2_u32m4_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vzext_vf2_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t vl) { +vuint32m8_t test_vzext_vf2_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, size_t vl) { return __riscv_vzext_vf2_u32m8_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vzext_vf2_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint64m1_t test_vzext_vf2_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t vl) { return __riscv_vzext_vf2_u64m1_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vzext_vf2_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint64m2_t test_vzext_vf2_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t vl) { return __riscv_vzext_vf2_u64m2_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vzext_vf2_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint64m4_t test_vzext_vf2_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t vl) { return __riscv_vzext_vf2_u64m4_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vzext_vf2_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint64m8_t test_vzext_vf2_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, size_t vl) { return __riscv_vzext_vf2_u64m8_tumu(vm, vd, vs2, vl); } -vuint16mf4_t test_vzext_vf2_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) { +vuint16mf4_t test_vzext_vf2_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t vl) { return __riscv_vzext_vf2_u16mf4_mu(vm, vd, vs2, vl); } -vuint16mf2_t test_vzext_vf2_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) { +vuint16mf2_t test_vzext_vf2_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t vl) { return __riscv_vzext_vf2_u16mf2_mu(vm, vd, vs2, vl); } -vuint16m1_t test_vzext_vf2_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) { +vuint16m1_t test_vzext_vf2_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t vl) { return __riscv_vzext_vf2_u16m1_mu(vm, vd, vs2, vl); } -vuint16m2_t test_vzext_vf2_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t vl) { +vuint16m2_t test_vzext_vf2_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + size_t vl) { return __riscv_vzext_vf2_u16m2_mu(vm, vd, vs2, vl); } -vuint16m4_t test_vzext_vf2_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t vl) { +vuint16m4_t test_vzext_vf2_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + size_t vl) { return __riscv_vzext_vf2_u16m4_mu(vm, vd, vs2, vl); } -vuint16m8_t test_vzext_vf2_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t vl) { +vuint16m8_t test_vzext_vf2_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + size_t vl) { return __riscv_vzext_vf2_u16m8_mu(vm, vd, vs2, vl); } -vuint32mf2_t test_vzext_vf2_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) { +vuint32mf2_t test_vzext_vf2_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t vl) { return __riscv_vzext_vf2_u32mf2_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vzext_vf2_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) { +vuint32m1_t test_vzext_vf2_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t vl) { return __riscv_vzext_vf2_u32m1_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vzext_vf2_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t vl) { +vuint32m2_t test_vzext_vf2_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t vl) { return __riscv_vzext_vf2_u32m2_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vzext_vf2_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t vl) { +vuint32m4_t test_vzext_vf2_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, size_t vl) { return __riscv_vzext_vf2_u32m4_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vzext_vf2_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t vl) { +vuint32m8_t test_vzext_vf2_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, size_t vl) { return __riscv_vzext_vf2_u32m8_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vzext_vf2_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) { +vuint64m1_t test_vzext_vf2_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t vl) { return __riscv_vzext_vf2_u64m1_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vzext_vf2_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t vl) { +vuint64m2_t test_vzext_vf2_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t vl) { return __riscv_vzext_vf2_u64m2_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vzext_vf2_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t vl) { +vuint64m4_t test_vzext_vf2_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t vl) { return __riscv_vzext_vf2_u64m4_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vzext_vf2_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t vl) { +vuint64m8_t test_vzext_vf2_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, size_t vl) { return __riscv_vzext_vf2_u64m8_mu(vm, vd, vs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vzext_vf4.c b/auto-generated/policy_funcs/llvm-api-tests/vzext_vf4.c index e70957a4f..98774a713 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vzext_vf4.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vzext_vf4.c @@ -5,15 +5,18 @@ #include -vuint32mf2_t test_vzext_vf4_u32mf2_tu(vuint32mf2_t vd, vuint8mf8_t vs2, size_t vl) { +vuint32mf2_t test_vzext_vf4_u32mf2_tu(vuint32mf2_t vd, vuint8mf8_t vs2, + size_t vl) { return __riscv_vzext_vf4_u32mf2_tu(vd, vs2, vl); } -vuint32m1_t test_vzext_vf4_u32m1_tu(vuint32m1_t vd, vuint8mf4_t vs2, size_t vl) { +vuint32m1_t test_vzext_vf4_u32m1_tu(vuint32m1_t vd, vuint8mf4_t vs2, + size_t vl) { return __riscv_vzext_vf4_u32m1_tu(vd, vs2, vl); } -vuint32m2_t test_vzext_vf4_u32m2_tu(vuint32m2_t vd, vuint8mf2_t vs2, size_t vl) { +vuint32m2_t test_vzext_vf4_u32m2_tu(vuint32m2_t vd, vuint8mf2_t vs2, + size_t vl) { return __riscv_vzext_vf4_u32m2_tu(vd, vs2, vl); } @@ -25,126 +28,157 @@ vuint32m8_t test_vzext_vf4_u32m8_tu(vuint32m8_t vd, vuint8m2_t vs2, size_t vl) { return __riscv_vzext_vf4_u32m8_tu(vd, vs2, vl); } -vuint64m1_t test_vzext_vf4_u64m1_tu(vuint64m1_t vd, vuint16mf4_t vs2, size_t vl) { +vuint64m1_t test_vzext_vf4_u64m1_tu(vuint64m1_t vd, vuint16mf4_t vs2, + size_t vl) { return __riscv_vzext_vf4_u64m1_tu(vd, vs2, vl); } -vuint64m2_t test_vzext_vf4_u64m2_tu(vuint64m2_t vd, vuint16mf2_t vs2, size_t vl) { +vuint64m2_t test_vzext_vf4_u64m2_tu(vuint64m2_t vd, vuint16mf2_t vs2, + size_t vl) { return __riscv_vzext_vf4_u64m2_tu(vd, vs2, vl); } -vuint64m4_t test_vzext_vf4_u64m4_tu(vuint64m4_t vd, vuint16m1_t vs2, size_t vl) { +vuint64m4_t test_vzext_vf4_u64m4_tu(vuint64m4_t vd, vuint16m1_t vs2, + size_t vl) { return __riscv_vzext_vf4_u64m4_tu(vd, vs2, vl); } -vuint64m8_t test_vzext_vf4_u64m8_tu(vuint64m8_t vd, vuint16m2_t vs2, size_t vl) { +vuint64m8_t test_vzext_vf4_u64m8_tu(vuint64m8_t vd, vuint16m2_t vs2, + size_t vl) { return __riscv_vzext_vf4_u64m8_tu(vd, vs2, vl); } -vuint32mf2_t test_vzext_vf4_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint8mf8_t vs2, size_t vl) { +vuint32mf2_t test_vzext_vf4_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint8mf8_t vs2, size_t vl) { return __riscv_vzext_vf4_u32mf2_tum(vm, vd, vs2, vl); } -vuint32m1_t test_vzext_vf4_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint8mf4_t vs2, size_t vl) { +vuint32m1_t test_vzext_vf4_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint8mf4_t vs2, size_t vl) { return __riscv_vzext_vf4_u32m1_tum(vm, vd, vs2, vl); } -vuint32m2_t test_vzext_vf4_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint8mf2_t vs2, size_t vl) { +vuint32m2_t test_vzext_vf4_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint8mf2_t vs2, size_t vl) { return __riscv_vzext_vf4_u32m2_tum(vm, vd, vs2, vl); } -vuint32m4_t test_vzext_vf4_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint8m1_t vs2, size_t vl) { +vuint32m4_t test_vzext_vf4_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint8m1_t vs2, size_t vl) { return __riscv_vzext_vf4_u32m4_tum(vm, vd, vs2, vl); } -vuint32m8_t test_vzext_vf4_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint8m2_t vs2, size_t vl) { +vuint32m8_t test_vzext_vf4_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint8m2_t vs2, size_t vl) { return __riscv_vzext_vf4_u32m8_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vzext_vf4_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint16mf4_t vs2, size_t vl) { +vuint64m1_t test_vzext_vf4_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint16mf4_t vs2, size_t vl) { return __riscv_vzext_vf4_u64m1_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vzext_vf4_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint16mf2_t vs2, size_t vl) { +vuint64m2_t test_vzext_vf4_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint16mf2_t vs2, size_t vl) { return __riscv_vzext_vf4_u64m2_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vzext_vf4_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint16m1_t vs2, size_t vl) { +vuint64m4_t test_vzext_vf4_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint16m1_t vs2, size_t vl) { return __riscv_vzext_vf4_u64m4_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vzext_vf4_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint16m2_t vs2, size_t vl) { +vuint64m8_t test_vzext_vf4_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint16m2_t vs2, size_t vl) { return __riscv_vzext_vf4_u64m8_tum(vm, vd, vs2, vl); } -vuint32mf2_t test_vzext_vf4_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint8mf8_t vs2, size_t vl) { +vuint32mf2_t test_vzext_vf4_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint8mf8_t vs2, size_t vl) { return __riscv_vzext_vf4_u32mf2_tumu(vm, vd, vs2, vl); } -vuint32m1_t test_vzext_vf4_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint8mf4_t vs2, size_t vl) { +vuint32m1_t test_vzext_vf4_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint8mf4_t vs2, size_t vl) { return __riscv_vzext_vf4_u32m1_tumu(vm, vd, vs2, vl); } -vuint32m2_t test_vzext_vf4_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint8mf2_t vs2, size_t vl) { +vuint32m2_t test_vzext_vf4_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint8mf2_t vs2, size_t vl) { return __riscv_vzext_vf4_u32m2_tumu(vm, vd, vs2, vl); } -vuint32m4_t test_vzext_vf4_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint8m1_t vs2, size_t vl) { +vuint32m4_t test_vzext_vf4_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint8m1_t vs2, size_t vl) { return __riscv_vzext_vf4_u32m4_tumu(vm, vd, vs2, vl); } -vuint32m8_t test_vzext_vf4_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint8m2_t vs2, size_t vl) { +vuint32m8_t test_vzext_vf4_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint8m2_t vs2, size_t vl) { return __riscv_vzext_vf4_u32m8_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vzext_vf4_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint16mf4_t vs2, size_t vl) { +vuint64m1_t test_vzext_vf4_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint16mf4_t vs2, size_t vl) { return __riscv_vzext_vf4_u64m1_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vzext_vf4_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint16mf2_t vs2, size_t vl) { +vuint64m2_t test_vzext_vf4_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint16mf2_t vs2, size_t vl) { return __riscv_vzext_vf4_u64m2_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vzext_vf4_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint16m1_t vs2, size_t vl) { +vuint64m4_t test_vzext_vf4_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint16m1_t vs2, size_t vl) { return __riscv_vzext_vf4_u64m4_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vzext_vf4_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint16m2_t vs2, size_t vl) { +vuint64m8_t test_vzext_vf4_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint16m2_t vs2, size_t vl) { return __riscv_vzext_vf4_u64m8_tumu(vm, vd, vs2, vl); } -vuint32mf2_t test_vzext_vf4_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint8mf8_t vs2, size_t vl) { +vuint32mf2_t test_vzext_vf4_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint8mf8_t vs2, size_t vl) { return __riscv_vzext_vf4_u32mf2_mu(vm, vd, vs2, vl); } -vuint32m1_t test_vzext_vf4_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint8mf4_t vs2, size_t vl) { +vuint32m1_t test_vzext_vf4_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint8mf4_t vs2, size_t vl) { return __riscv_vzext_vf4_u32m1_mu(vm, vd, vs2, vl); } -vuint32m2_t test_vzext_vf4_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint8mf2_t vs2, size_t vl) { +vuint32m2_t test_vzext_vf4_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint8mf2_t vs2, size_t vl) { return __riscv_vzext_vf4_u32m2_mu(vm, vd, vs2, vl); } -vuint32m4_t test_vzext_vf4_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint8m1_t vs2, size_t vl) { +vuint32m4_t test_vzext_vf4_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint8m1_t vs2, + size_t vl) { return __riscv_vzext_vf4_u32m4_mu(vm, vd, vs2, vl); } -vuint32m8_t test_vzext_vf4_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint8m2_t vs2, size_t vl) { +vuint32m8_t test_vzext_vf4_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint8m2_t vs2, + size_t vl) { return __riscv_vzext_vf4_u32m8_mu(vm, vd, vs2, vl); } -vuint64m1_t test_vzext_vf4_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint16mf4_t vs2, size_t vl) { +vuint64m1_t test_vzext_vf4_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint16mf4_t vs2, size_t vl) { return __riscv_vzext_vf4_u64m1_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vzext_vf4_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint16mf2_t vs2, size_t vl) { +vuint64m2_t test_vzext_vf4_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint16mf2_t vs2, size_t vl) { return __riscv_vzext_vf4_u64m2_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vzext_vf4_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint16m1_t vs2, size_t vl) { +vuint64m4_t test_vzext_vf4_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint16m1_t vs2, size_t vl) { return __riscv_vzext_vf4_u64m4_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vzext_vf4_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint16m2_t vs2, size_t vl) { +vuint64m8_t test_vzext_vf4_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint16m2_t vs2, size_t vl) { return __riscv_vzext_vf4_u64m8_mu(vm, vd, vs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-api-tests/vzext_vf8.c b/auto-generated/policy_funcs/llvm-api-tests/vzext_vf8.c index ad06de532..39fdbb2ac 100644 --- a/auto-generated/policy_funcs/llvm-api-tests/vzext_vf8.c +++ b/auto-generated/policy_funcs/llvm-api-tests/vzext_vf8.c @@ -5,15 +5,18 @@ #include -vuint64m1_t test_vzext_vf8_u64m1_tu(vuint64m1_t vd, vuint8mf8_t vs2, size_t vl) { +vuint64m1_t test_vzext_vf8_u64m1_tu(vuint64m1_t vd, vuint8mf8_t vs2, + size_t vl) { return __riscv_vzext_vf8_u64m1_tu(vd, vs2, vl); } -vuint64m2_t test_vzext_vf8_u64m2_tu(vuint64m2_t vd, vuint8mf4_t vs2, size_t vl) { +vuint64m2_t test_vzext_vf8_u64m2_tu(vuint64m2_t vd, vuint8mf4_t vs2, + size_t vl) { return __riscv_vzext_vf8_u64m2_tu(vd, vs2, vl); } -vuint64m4_t test_vzext_vf8_u64m4_tu(vuint64m4_t vd, vuint8mf2_t vs2, size_t vl) { +vuint64m4_t test_vzext_vf8_u64m4_tu(vuint64m4_t vd, vuint8mf2_t vs2, + size_t vl) { return __riscv_vzext_vf8_u64m4_tu(vd, vs2, vl); } @@ -21,50 +24,62 @@ vuint64m8_t test_vzext_vf8_u64m8_tu(vuint64m8_t vd, vuint8m1_t vs2, size_t vl) { return __riscv_vzext_vf8_u64m8_tu(vd, vs2, vl); } -vuint64m1_t test_vzext_vf8_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint8mf8_t vs2, size_t vl) { +vuint64m1_t test_vzext_vf8_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint8mf8_t vs2, size_t vl) { return __riscv_vzext_vf8_u64m1_tum(vm, vd, vs2, vl); } -vuint64m2_t test_vzext_vf8_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint8mf4_t vs2, size_t vl) { +vuint64m2_t test_vzext_vf8_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint8mf4_t vs2, size_t vl) { return __riscv_vzext_vf8_u64m2_tum(vm, vd, vs2, vl); } -vuint64m4_t test_vzext_vf8_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint8mf2_t vs2, size_t vl) { +vuint64m4_t test_vzext_vf8_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint8mf2_t vs2, size_t vl) { return __riscv_vzext_vf8_u64m4_tum(vm, vd, vs2, vl); } -vuint64m8_t test_vzext_vf8_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint8m1_t vs2, size_t vl) { +vuint64m8_t test_vzext_vf8_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint8m1_t vs2, size_t vl) { return __riscv_vzext_vf8_u64m8_tum(vm, vd, vs2, vl); } -vuint64m1_t test_vzext_vf8_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint8mf8_t vs2, size_t vl) { +vuint64m1_t test_vzext_vf8_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint8mf8_t vs2, size_t vl) { return __riscv_vzext_vf8_u64m1_tumu(vm, vd, vs2, vl); } -vuint64m2_t test_vzext_vf8_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint8mf4_t vs2, size_t vl) { +vuint64m2_t test_vzext_vf8_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint8mf4_t vs2, size_t vl) { return __riscv_vzext_vf8_u64m2_tumu(vm, vd, vs2, vl); } -vuint64m4_t test_vzext_vf8_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint8mf2_t vs2, size_t vl) { +vuint64m4_t test_vzext_vf8_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint8mf2_t vs2, size_t vl) { return __riscv_vzext_vf8_u64m4_tumu(vm, vd, vs2, vl); } -vuint64m8_t test_vzext_vf8_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint8m1_t vs2, size_t vl) { +vuint64m8_t test_vzext_vf8_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint8m1_t vs2, size_t vl) { return __riscv_vzext_vf8_u64m8_tumu(vm, vd, vs2, vl); } -vuint64m1_t test_vzext_vf8_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint8mf8_t vs2, size_t vl) { +vuint64m1_t test_vzext_vf8_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint8mf8_t vs2, size_t vl) { return __riscv_vzext_vf8_u64m1_mu(vm, vd, vs2, vl); } -vuint64m2_t test_vzext_vf8_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint8mf4_t vs2, size_t vl) { +vuint64m2_t test_vzext_vf8_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint8mf4_t vs2, size_t vl) { return __riscv_vzext_vf8_u64m2_mu(vm, vd, vs2, vl); } -vuint64m4_t test_vzext_vf8_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint8mf2_t vs2, size_t vl) { +vuint64m4_t test_vzext_vf8_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint8mf2_t vs2, size_t vl) { return __riscv_vzext_vf8_u64m4_mu(vm, vd, vs2, vl); } -vuint64m8_t test_vzext_vf8_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint8m1_t vs2, size_t vl) { +vuint64m8_t test_vzext_vf8_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint8m1_t vs2, + size_t vl) { return __riscv_vzext_vf8_u64m8_mu(vm, vd, vs2, vl); } diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vcompress.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vcompress.c index b6a4ef19a..aff9afd33 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vcompress.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vcompress.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfabs.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfabs.c index 6ffbd7a63..9c5a9851d 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfabs.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfabs.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfadd.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfadd.c index 886f0eda8..f09e8a69c 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfadd.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfadd.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfclass.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfclass.c index 79128aea9..a104f6762 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfclass.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfclass.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfcvt.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfcvt.c index 39640927d..c8dbd1931 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfcvt.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfcvt.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfcvt_rtz.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfcvt_rtz.c index dc7e3e646..eba687636 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfcvt_rtz.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfcvt_rtz.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfdiv.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfdiv.c index 1b0bd9d8c..b6b6e1ae5 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfdiv.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfdiv.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfmacc.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfmacc.c index 718e8db82..1e40ba60f 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfmacc.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfmacc.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfmadd.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfmadd.c index c154592a7..56eb76d7e 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfmadd.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfmadd.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfmax.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfmax.c index 703490a77..36f423f35 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfmax.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfmax.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfmerge.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfmerge.c index 9e31f3039..df748485a 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfmerge.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfmerge.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfmin.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfmin.c index b9d268770..b9e39913e 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfmin.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfmin.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfmsac.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfmsac.c index 7d3fdb4f3..c5c6da74c 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfmsac.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfmsac.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfmsub.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfmsub.c index 427d12024..7461bea5c 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfmsub.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfmsub.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfmul.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfmul.c index e6ad40115..1cf9e0274 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfmul.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfmul.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfmv.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfmv.c index 8566da9fd..91038fd65 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfmv.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfmv.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfncvt.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfncvt.c index 0dee6aa75..cf64ceeb0 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfncvt.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfncvt.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfncvt_rod.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfncvt_rod.c index 8409807a9..0859315d2 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfncvt_rod.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfncvt_rod.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfncvt_rtz.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfncvt_rtz.c index 689a3ac8d..78ca40d8f 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfncvt_rtz.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfncvt_rtz.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfneg.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfneg.c index ff04891db..ccfec5c7f 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfneg.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfneg.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfnmacc.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfnmacc.c index 503513ae1..749a7081c 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfnmacc.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfnmacc.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfnmadd.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfnmadd.c index d53625403..8697c6575 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfnmadd.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfnmadd.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfnmsac.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfnmsac.c index 7ca40def0..9d172fc67 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfnmsac.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfnmsac.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfnmsub.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfnmsub.c index 96f2f0066..bccfd5966 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfnmsub.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfnmsub.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfrdiv.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfrdiv.c index 850c399df..1a60fc3e9 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfrdiv.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfrdiv.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfrec7.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfrec7.c index 7106612b1..fee19f6ab 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfrec7.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfrec7.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfredmax.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfredmax.c index 303024300..0cc4a4040 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfredmax.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfredmax.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfredmin.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfredmin.c index 699643c1a..0fc2fda1d 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfredmin.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfredmin.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfredosum.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfredosum.c index a1dc3d992..d861e8ff7 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfredosum.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfredosum.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfredusum.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfredusum.c index dd5a3b915..ae92f6c73 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfredusum.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfredusum.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfrsqrt7.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfrsqrt7.c index c02e6426f..161f6fd71 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfrsqrt7.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfrsqrt7.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfrsub.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfrsub.c index 374979b86..9e117f73b 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfrsub.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfrsub.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfsgnj.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfsgnj.c index e4b54bf08..984f3990c 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfsgnj.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfsgnj.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfsgnjn.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfsgnjn.c index 2d3264905..b6b8148c5 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfsgnjn.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfsgnjn.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfsgnjx.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfsgnjx.c index 9cd3ff655..116a9f1eb 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfsgnjx.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfsgnjx.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfslide1down.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfslide1down.c index 97fd8a8e3..a81895634 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfslide1down.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfslide1down.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfslide1up.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfslide1up.c index 7bdd6f434..778d3905f 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfslide1up.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfslide1up.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfsqrt.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfsqrt.c index a193deb01..0f7a8a365 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfsqrt.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfsqrt.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfsub.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfsub.c index a435f598e..1c7293cdc 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfsub.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfsub.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfwadd.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfwadd.c index a442510aa..e8e9fcd57 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfwadd.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfwadd.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfwcvt.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfwcvt.c index 456e0a331..62719e787 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfwcvt.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfwcvt.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfwcvt_rtz.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfwcvt_rtz.c index 2e01384b7..54903e543 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfwcvt_rtz.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfwcvt_rtz.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfwmacc.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfwmacc.c index 1fa53ed3b..52811c1d9 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfwmacc.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfwmacc.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfwmsac.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfwmsac.c index 35fe067a6..32bb460bf 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfwmsac.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfwmsac.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfwmul.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfwmul.c index ad5b2ed2d..84ad7ec30 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfwmul.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfwmul.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfwnmacc.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfwnmacc.c index dc2c53056..02d9e2231 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfwnmacc.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfwnmacc.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfwnmsac.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfwnmsac.c index 26d4e3367..0c4713abb 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfwnmsac.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfwnmsac.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfwredosum.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfwredosum.c index ff84c4aea..1ede7ddee 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfwredosum.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfwredosum.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfwredusum.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfwredusum.c index d8f8d12c8..a196c15ed 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfwredusum.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfwredusum.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vfwsub.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vfwsub.c index 476c1f0f7..f1b5720d2 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vfwsub.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vfwsub.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vle16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vle16.c index bc58bf6a5..92f0d41c6 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vle16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vle16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vle16ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vle16ff.c index cc1088f82..4821933f4 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vle16ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vle16ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vle32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vle32.c index b52d00c8a..fb9f4cec6 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vle32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vle32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vle32ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vle32ff.c index a5555ac8f..f0385e548 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vle32ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vle32ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vle64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vle64.c index 3b5ad37ea..dd94c6cb5 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vle64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vle64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vle64ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vle64ff.c index 5336fa372..f6bf02407 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vle64ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vle64ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vle8.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vle8.c index 8f1f7d53d..7569c4bad 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vle8.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vle8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vle8ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vle8ff.c index dd1ae5d76..4d4860688 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vle8ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vle8ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxei16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxei16.c index 68900a9f8..f754f4ebc 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxei16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxei32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxei32.c index df4b06c57..f9c53643e 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxei32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxei64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxei64.c index b46ebded4..f1165b1cf 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxei64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxei8.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxei8.c index e485fea80..bd90325d9 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxei8.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg2ei16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg2ei16.c index 9b31232d2..5d01d7a9c 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg2ei16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg2ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg2ei32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg2ei32.c index 885be317d..89d3ccfae 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg2ei32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg2ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg2ei64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg2ei64.c index 4326aff4a..a4849568a 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg2ei64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg2ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg2ei8.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg2ei8.c index 2dd764779..9c1c879db 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg2ei8.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg2ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg3ei16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg3ei16.c index 419a9725a..8c1929e7a 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg3ei16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg3ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg3ei32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg3ei32.c index a3e875cfa..b599353de 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg3ei32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg3ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg3ei64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg3ei64.c index f3830edf5..83ae974a5 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg3ei64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg3ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg3ei8.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg3ei8.c index a35ba5133..a756b75f9 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg3ei8.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg3ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg4ei16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg4ei16.c index 3f028e3be..e9f55962c 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg4ei16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg4ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg4ei32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg4ei32.c index d23de5d1d..49dc32d0f 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg4ei32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg4ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg4ei64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg4ei64.c index 5e9bacab6..1ba0074d5 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg4ei64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg4ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg4ei8.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg4ei8.c index d4f465301..775d3ec0c 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg4ei8.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg4ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg5ei16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg5ei16.c index 177dd9a8f..b052379c6 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg5ei16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg5ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg5ei32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg5ei32.c index 956127665..441ae232a 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg5ei32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg5ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg5ei64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg5ei64.c index 469c280d8..8baccbb42 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg5ei64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg5ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg5ei8.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg5ei8.c index dd07162d3..94635b110 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg5ei8.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg5ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg6ei16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg6ei16.c index e5d97dc14..8837aa9db 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg6ei16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg6ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg6ei32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg6ei32.c index e3cfec40f..48f262498 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg6ei32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg6ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg6ei64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg6ei64.c index 6e44c72ce..1d08dafd7 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg6ei64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg6ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg6ei8.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg6ei8.c index 7ba1fc686..861e32188 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg6ei8.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg6ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg7ei16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg7ei16.c index d796627e8..62daafe80 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg7ei16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg7ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg7ei32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg7ei32.c index 9709aaa73..c91b23ae1 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg7ei32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg7ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg7ei64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg7ei64.c index a31726995..e979d407e 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg7ei64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg7ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg7ei8.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg7ei8.c index bdf132c7e..8f82146b9 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg7ei8.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg7ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg8ei16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg8ei16.c index c45f19037..810b69125 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg8ei16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg8ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg8ei32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg8ei32.c index cbe5def44..0c0eca097 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg8ei32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg8ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg8ei64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg8ei64.c index 442a6821f..8e4eb6c80 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg8ei64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg8ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg8ei8.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg8ei8.c index 5504d6dd3..e36736b07 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg8ei8.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vloxseg8ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlse16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlse16.c index 6f6ca153d..5942907a8 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlse16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlse16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlse32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlse32.c index 281cf2829..ba7f6c122 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlse32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlse32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlse64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlse64.c index 774d4ff6a..15d4d2137 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlse64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlse64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg2e16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg2e16.c index bad1ff817..90a0735fa 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg2e16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg2e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg2e16ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg2e16ff.c index 0af913be8..6a266712e 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg2e16ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg2e16ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg2e32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg2e32.c index f558c3de0..c3bcbe4a4 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg2e32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg2e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg2e32ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg2e32ff.c index b80db315d..0cd7a7fbb 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg2e32ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg2e32ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg2e64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg2e64.c index 66e67b8c0..b4b357079 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg2e64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg2e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg2e64ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg2e64ff.c index 6fa9ac827..ee2988ab5 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg2e64ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg2e64ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg2e8ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg2e8ff.c index 075fe72de..b7920bb10 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg2e8ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg2e8ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg3e16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg3e16.c index 26f14a608..68630705d 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg3e16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg3e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg3e16ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg3e16ff.c index 8dafe1b68..f73df02fe 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg3e16ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg3e16ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg3e32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg3e32.c index 1bd7bbdeb..a4908ccd4 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg3e32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg3e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg3e32ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg3e32ff.c index 8c92a0ce2..596cb62db 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg3e32ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg3e32ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg3e64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg3e64.c index ed63c34cc..808d921b1 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg3e64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg3e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg3e64ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg3e64ff.c index c3a49b399..0e32afca7 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg3e64ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg3e64ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg3e8ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg3e8ff.c index 793a75fd5..5679e02fc 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg3e8ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg3e8ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg4e16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg4e16.c index 07ffe2ed9..8bd1880c0 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg4e16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg4e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg4e16ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg4e16ff.c index abb48b7a6..e31d52f53 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg4e16ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg4e16ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg4e32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg4e32.c index ce0484ffc..d3c7a215a 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg4e32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg4e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg4e32ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg4e32ff.c index dd673e4d8..1a677edaf 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg4e32ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg4e32ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg4e64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg4e64.c index 8ed6f2a53..ff031cb79 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg4e64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg4e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg4e64ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg4e64ff.c index 84dbf9886..490f8e50e 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg4e64ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg4e64ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg4e8ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg4e8ff.c index fe0b21e61..4d06a608a 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg4e8ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg4e8ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg5e16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg5e16.c index c10a479d6..a90f68010 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg5e16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg5e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg5e16ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg5e16ff.c index fa489e9cb..77acd1f59 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg5e16ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg5e16ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg5e32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg5e32.c index f3cccdd2b..08caaa083 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg5e32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg5e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg5e32ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg5e32ff.c index c12aaeff3..555dba2d4 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg5e32ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg5e32ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg5e64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg5e64.c index 8a1f4a2e0..1959c266e 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg5e64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg5e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg5e64ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg5e64ff.c index 2f1b00880..007215bf4 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg5e64ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg5e64ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg5e8ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg5e8ff.c index 978e8bb0c..254b6ad74 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg5e8ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg5e8ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg6e16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg6e16.c index 147ee0e06..1c5533746 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg6e16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg6e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg6e16ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg6e16ff.c index a3a9eb43f..bcd616b39 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg6e16ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg6e16ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg6e32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg6e32.c index ee038c04f..ac212476c 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg6e32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg6e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg6e32ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg6e32ff.c index 6e60793a1..e394a7018 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg6e32ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg6e32ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg6e64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg6e64.c index 8b9f8d031..80885eaf7 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg6e64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg6e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg6e64ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg6e64ff.c index 87fbefe64..693a01220 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg6e64ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg6e64ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg6e8ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg6e8ff.c index 13774adc9..ec2d2b477 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg6e8ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg6e8ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg7e16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg7e16.c index 80e77ccc9..8dc5c325d 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg7e16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg7e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg7e16ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg7e16ff.c index 44579d3c4..738b94ad7 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg7e16ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg7e16ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg7e32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg7e32.c index c01fea22f..0fb9776ab 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg7e32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg7e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg7e32ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg7e32ff.c index b6d825bf2..1050dab06 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg7e32ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg7e32ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg7e64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg7e64.c index afa53e4ed..fcb433e79 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg7e64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg7e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg7e64ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg7e64ff.c index 8f3e9f5c3..99bb3e073 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg7e64ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg7e64ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg7e8ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg7e8ff.c index 7def83e8d..bc4edf438 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg7e8ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg7e8ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg8e16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg8e16.c index e7fbced6b..9344c1426 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg8e16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg8e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg8e16ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg8e16ff.c index cf1d563d6..f84a3f990 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg8e16ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg8e16ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg8e32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg8e32.c index 6a41f6295..27a08d539 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg8e32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg8e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg8e32ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg8e32ff.c index 2ec917025..304c902fd 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg8e32ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg8e32ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg8e64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg8e64.c index 0bc7decfb..d9d28bacf 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg8e64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg8e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg8e64ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg8e64ff.c index c1e255a6e..918b4b8e7 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg8e64ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg8e64ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg8e8ff.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg8e8ff.c index 3d50031bb..feb9a5f1e 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg8e8ff.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlseg8e8ff.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg2e16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg2e16.c index a0e1396ab..570f2dbfe 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg2e16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg2e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg2e32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg2e32.c index b0bf3a7ca..03d192acd 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg2e32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg2e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg2e64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg2e64.c index d21d05482..6faddbcea 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg2e64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg2e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg3e16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg3e16.c index 94cf3b903..d80e838cf 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg3e16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg3e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg3e32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg3e32.c index 6ddda2f0d..ffe819f1e 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg3e32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg3e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg3e64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg3e64.c index 9ef6f640d..923934939 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg3e64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg3e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg4e16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg4e16.c index 271a37d0e..651e263f5 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg4e16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg4e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg4e32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg4e32.c index 17a27d148..e8ab6dae8 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg4e32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg4e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg4e64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg4e64.c index cb2b52177..c9d6bdfd6 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg4e64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg4e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg5e16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg5e16.c index 57c3bc501..f3ffd10a1 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg5e16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg5e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg5e32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg5e32.c index 616d71d37..b14223d91 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg5e32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg5e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg5e64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg5e64.c index 1e09d5609..10e896cad 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg5e64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg5e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg6e16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg6e16.c index 8665cc75a..7b33060b0 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg6e16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg6e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg6e32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg6e32.c index 01f488283..c757ab332 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg6e32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg6e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg6e64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg6e64.c index 4de71167f..13580c9b4 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg6e64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg6e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg7e16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg7e16.c index 64565ce47..ac22bb90b 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg7e16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg7e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg7e32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg7e32.c index 9bfc6bd86..a5c204c42 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg7e32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg7e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg7e64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg7e64.c index 2d8537f74..696029072 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg7e64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg7e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg8e16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg8e16.c index 4b461fc8e..e4f98578a 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg8e16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg8e16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg8e32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg8e32.c index 107afa848..927a0047d 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg8e32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg8e32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg8e64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg8e64.c index 4b8ad78ef..6e6bd3f4b 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg8e64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vlsseg8e64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxei16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxei16.c index 899305333..aa6d6bb1c 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxei16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxei32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxei32.c index 1292e0cd5..1baca4f94 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxei32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxei64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxei64.c index 99204ac08..ca77cb40c 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxei64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxei8.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxei8.c index fa6508e05..10f1a1fd9 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxei8.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg2ei16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg2ei16.c index feb68e37f..d8170c810 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg2ei16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg2ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg2ei32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg2ei32.c index 7856827a9..adf15ee83 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg2ei32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg2ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg2ei64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg2ei64.c index f349c8557..737faa8a8 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg2ei64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg2ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg2ei8.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg2ei8.c index f9b20297b..6872ecba1 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg2ei8.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg2ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg3ei16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg3ei16.c index 12be9a2ef..309a72d9b 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg3ei16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg3ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg3ei32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg3ei32.c index f0cc7728f..0a443d5a3 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg3ei32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg3ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg3ei64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg3ei64.c index f133a43ae..b06a9b1d9 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg3ei64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg3ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg3ei8.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg3ei8.c index 15fee21d0..021e78325 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg3ei8.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg3ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg4ei16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg4ei16.c index 0887b0777..4708aefb1 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg4ei16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg4ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg4ei32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg4ei32.c index ac3e8fa87..e8d15d0be 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg4ei32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg4ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg4ei64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg4ei64.c index e8441c0fa..802437de9 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg4ei64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg4ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg4ei8.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg4ei8.c index da4921663..c7dd73cb8 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg4ei8.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg4ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg5ei16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg5ei16.c index 75b9ff94a..772c92166 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg5ei16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg5ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg5ei32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg5ei32.c index d6a1ce318..07ff1c519 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg5ei32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg5ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg5ei64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg5ei64.c index d81903000..4e5891f03 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg5ei64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg5ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg5ei8.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg5ei8.c index 0c810f109..44df0e290 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg5ei8.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg5ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg6ei16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg6ei16.c index 408f1266d..7438ad5f9 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg6ei16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg6ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg6ei32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg6ei32.c index 3cfb61b22..50452c32c 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg6ei32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg6ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg6ei64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg6ei64.c index ce0bc0ba8..d1d5f6629 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg6ei64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg6ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg6ei8.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg6ei8.c index c2b84f581..52f7c8a88 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg6ei8.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg6ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg7ei16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg7ei16.c index 51f2a3de6..8c35f499f 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg7ei16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg7ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg7ei32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg7ei32.c index 81e2baea3..b2f3a6411 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg7ei32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg7ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg7ei64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg7ei64.c index 859395b33..8274b160f 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg7ei64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg7ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg7ei8.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg7ei8.c index a6e1f5e0b..2d0c4ec10 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg7ei8.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg7ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg8ei16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg8ei16.c index fe54c48ed..bef231c8f 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg8ei16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg8ei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg8ei32.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg8ei32.c index 6b1dce928..8a6cafe4c 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg8ei32.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg8ei32.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg8ei64.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg8ei64.c index e9d3285d6..22b164947 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg8ei64.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg8ei64.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg8ei8.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg8ei8.c index 89aa06500..423527a7c 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg8ei8.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vluxseg8ei8.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vmacc.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vmacc.c index 58a8328ce..a82504f14 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vmacc.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vmacc.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vmadd.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vmadd.c index b5e4ff356..fc2e9768d 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vmadd.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vmadd.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vmerge.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vmerge.c index afd49ee32..1dc48c162 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vmerge.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vmerge.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vmfeq.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vmfeq.c index 01b3634e3..acfa17f71 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vmfeq.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vmfeq.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vmfge.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vmfge.c index 5ab824dc0..367e75037 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vmfge.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vmfge.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vmfgt.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vmfgt.c index 9e131cf80..d16d9440a 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vmfgt.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vmfgt.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vmfle.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vmfle.c index a58fbf3c6..e77c6a7c4 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vmfle.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vmfle.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vmflt.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vmflt.c index a73ba48d5..4accb49b4 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vmflt.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vmflt.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vmfne.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vmfne.c index 96812be07..71d7b82db 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vmfne.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vmfne.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vmseq.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vmseq.c index bf82293e0..ff6da3992 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vmseq.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vmseq.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vmsge.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vmsge.c index 6af191473..0dd52c932 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vmsge.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vmsge.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vmsgeu.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vmsgeu.c index 45b51317b..f123546be 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vmsgeu.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vmsgeu.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vmsgt.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vmsgt.c index 7587e6854..e89a32bf6 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vmsgt.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vmsgt.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vmsgtu.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vmsgtu.c index 786b48b2a..529891165 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vmsgtu.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vmsgtu.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vmsle.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vmsle.c index ee16b7151..fee6e23eb 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vmsle.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vmsle.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vmsleu.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vmsleu.c index 3be414c02..9b1d91935 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vmsleu.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vmsleu.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vmslt.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vmslt.c index 8ee4150dd..e17b3d3cd 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vmslt.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vmslt.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vmsltu.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vmsltu.c index 0c8c03ce5..e8e75648b 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vmsltu.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vmsltu.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vmsne.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vmsne.c index a55c04b4e..7ee879bd3 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vmsne.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vmsne.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vmv.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vmv.c index 16a4e3bf1..5780f04d1 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vmv.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vmv.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vneg.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vneg.c index c7701b57a..e2f9e7ab6 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vneg.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vneg.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vnmsac.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vnmsac.c index 22186e82a..4e569afac 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vnmsac.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vnmsac.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vnmsub.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vnmsub.c index 39372bbdf..0fe74eee1 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vnmsub.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vnmsub.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vrgather.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vrgather.c index cd49beac3..35cbde176 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vrgather.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vrgather.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vrgatherei16.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vrgatherei16.c index 202934e97..e6d490c4f 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vrgatherei16.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vrgatherei16.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vslidedown.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vslidedown.c index cff482041..f02c08159 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vslidedown.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vslidedown.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vslideup.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vslideup.c index 6b8627c36..d9a17dc8b 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vslideup.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vslideup.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vwmacc.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vwmacc.c index 36da79b9b..a3c628743 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vwmacc.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vwmacc.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vwmaccsu.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vwmaccsu.c index 5f62ac3e5..758aeecf0 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vwmaccsu.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vwmaccsu.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vwmaccu.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vwmaccu.c index 4de89dc83..dcfae3248 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vwmaccu.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vwmaccu.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/llvm-overloaded-tests/vwmaccus.c b/auto-generated/policy_funcs/llvm-overloaded-tests/vwmaccus.c index 6f0468ccf..61fbd21bb 100644 --- a/auto-generated/policy_funcs/llvm-overloaded-tests/vwmaccus.c +++ b/auto-generated/policy_funcs/llvm-overloaded-tests/vwmaccus.c @@ -1,6 +1,6 @@ // REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s diff --git a/auto-generated/policy_funcs/overloaded_intrinsic_funcs.adoc b/auto-generated/policy_funcs/overloaded_intrinsic_funcs.adoc index 33d43db18..aa9efac4b 100644 --- a/auto-generated/policy_funcs/overloaded_intrinsic_funcs.adoc +++ b/auto-generated/policy_funcs/overloaded_intrinsic_funcs.adoc @@ -51495,6 +51495,7 @@ vuint64m8_t __riscv_vmv_v_tu(vuint64m8_t vd, uint64_t rs1, size_t vl); [[policy-variant-overloadedvector-single-width-saturating-add-and-subtract]] ==== Vector Single-Width Saturating Add and Subtract Intrinsics +After executing an intrinsic in this section, the `vxsat` CSR assumes an UNSPECIFIED value. [,c] ---- @@ -54335,6 +54336,7 @@ vuint64m8_t __riscv_vasubu_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, [[policy-variant-overloadedvector-single-width-fractional-multiply-with-rounding-and-saturation]] ==== Vector Single-Width Fractional Multiply with Rounding and SaturationIntrinsics +After executing an intrinsic in this section, the `vxsat` CSR assumes an UNSPECIFIED value. [,c] ---- @@ -55411,6 +55413,7 @@ vuint64m8_t __riscv_vssrl_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, [[policy-variant-overloadedvector-narrowing-fixed-point-clip]] ==== Vector Narrowing Fixed-Point Clip Intrinsics +After executing an intrinsic in this section, the `vxsat` CSR assumes an UNSPECIFIED value. [,c] ---- diff --git a/auto-generated/policy_funcs/overloaded_intrinsic_funcs/03_vector_fixed-point_arithmetic_intrinsics.adoc b/auto-generated/policy_funcs/overloaded_intrinsic_funcs/03_vector_fixed-point_arithmetic_intrinsics.adoc index db2c21119..f1570468b 100644 --- a/auto-generated/policy_funcs/overloaded_intrinsic_funcs/03_vector_fixed-point_arithmetic_intrinsics.adoc +++ b/auto-generated/policy_funcs/overloaded_intrinsic_funcs/03_vector_fixed-point_arithmetic_intrinsics.adoc @@ -3,6 +3,7 @@ [[policy-variant-overloadedvector-single-width-saturating-add-and-subtract]] ==== Vector Single-Width Saturating Add and Subtract Intrinsics +After executing an intrinsic in this section, the `vxsat` CSR assumes an UNSPECIFIED value. [,c] ---- @@ -2843,6 +2844,7 @@ vuint64m8_t __riscv_vasubu_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, [[policy-variant-overloadedvector-single-width-fractional-multiply-with-rounding-and-saturation]] ==== Vector Single-Width Fractional Multiply with Rounding and SaturationIntrinsics +After executing an intrinsic in this section, the `vxsat` CSR assumes an UNSPECIFIED value. [,c] ---- @@ -3919,6 +3921,7 @@ vuint64m8_t __riscv_vssrl_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, [[policy-variant-overloadedvector-narrowing-fixed-point-clip]] ==== Vector Narrowing Fixed-Point Clip Intrinsics +After executing an intrinsic in this section, the `vxsat` CSR assumes an UNSPECIFIED value. [,c] ---- diff --git a/auto-generated/vector-crypto/api-testing/vaesdf.c b/auto-generated/vector-crypto/api-testing/vaesdf.c new file mode 100644 index 000000000..e12a3719a --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vaesdf.c @@ -0,0 +1,93 @@ +#include +#include + +vuint32mf2_t test_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_u32mf2(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m8(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m1(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m8(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m2(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m8(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m4(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m4_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m4_u32m8(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m8(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vaesdm.c b/auto-generated/vector-crypto/api-testing/vaesdm.c new file mode 100644 index 000000000..bac6d0a06 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vaesdm.c @@ -0,0 +1,93 @@ +#include +#include + +vuint32mf2_t test_vaesdm_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_u32mf2(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m8(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m1(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m8(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m2(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m8(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m4(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m4_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m4_u32m8(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m8(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vaesef.c b/auto-generated/vector-crypto/api-testing/vaesef.c new file mode 100644 index 000000000..72255ee9e --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vaesef.c @@ -0,0 +1,93 @@ +#include +#include + +vuint32mf2_t test_vaesef_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vv_u32mf2(vd, vs2, vl); +} + +vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m8(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m1(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m8(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m2(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m8(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m4(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m4_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m4_u32m8(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m8(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vaesem.c b/auto-generated/vector-crypto/api-testing/vaesem.c new file mode 100644 index 000000000..ce7186cb4 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vaesem.c @@ -0,0 +1,93 @@ +#include +#include + +vuint32mf2_t test_vaesem_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vv_u32mf2(vd, vs2, vl); +} + +vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m8(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m1(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m8(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m2(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m8(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m4(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m4_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m4_u32m8(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m8(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vaeskf1.c b/auto-generated/vector-crypto/api-testing/vaeskf1.c new file mode 100644 index 000000000..8bb210aa3 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vaeskf1.c @@ -0,0 +1,22 @@ +#include +#include + +vuint32mf2_t test_vaeskf1_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32mf2(vs2, 0, vl); +} + +vuint32m1_t test_vaeskf1_vi_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m1(vs2, 0, vl); +} + +vuint32m2_t test_vaeskf1_vi_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m2(vs2, 0, vl); +} + +vuint32m4_t test_vaeskf1_vi_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m4(vs2, 0, vl); +} + +vuint32m8_t test_vaeskf1_vi_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m8(vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vaeskf2.c b/auto-generated/vector-crypto/api-testing/vaeskf2.c new file mode 100644 index 000000000..5d26a5400 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vaeskf2.c @@ -0,0 +1,23 @@ +#include +#include + +vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaeskf2_vi_u32mf2(vd, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m1(vd, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m2(vd, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m4(vd, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m8(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vaesz.c b/auto-generated/vector-crypto/api-testing/vaesz.c new file mode 100644 index 000000000..d344914e8 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vaesz.c @@ -0,0 +1,72 @@ +#include +#include + +vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m8(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m8(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m8(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m4_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m4_u32m8(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vandn.c b/auto-generated/vector-crypto/api-testing/vandn.c new file mode 100644 index 000000000..0c7002f3a --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vandn.c @@ -0,0 +1,401 @@ +#include +#include + +vuint8mf8_t test_vandn_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8(vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8(vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4(vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4(vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2(vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2(vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1(vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1(vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2(vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2(vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4(vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4(vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8(vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8(vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16mf4(vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4(vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16mf2(vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2(vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1(vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1(vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2(vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2(vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4(vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4(vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8(vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8(vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32mf2(vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2(vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1(vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1(vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2(vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2(vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4(vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4(vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8(vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8(vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8(vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_m(vm, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8mf8_m(vm, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_m(vm, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8mf4_m(vm, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_m(vm, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8mf2_m(vm, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8m1_m(vm, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8m1_m(vm, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8m2_m(vm, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8m2_m(vm, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8m4_m(vm, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8m4_m(vm, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8m8_m(vm, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8m8_m(vm, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_m(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_m(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_m(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_m(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_m(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_vx_u16m1_m(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16m2_m(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_vx_u16m2_m(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16m4_m(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_vx_u16m4_m(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16m8_m(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_vx_u16m8_m(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_m(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_m(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_m(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_vx_u32m1_m(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_m(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_vx_u32m2_m(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32m4_m(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_vx_u32m4_m(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32m8_m(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_vx_u32m8_m(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_m(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn_vx_u64m1_m(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_m(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn_vx_u64m2_m(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_m(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn_vx_u64m4_m(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u64m8_m(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn_vx_u64m8_m(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vbrev.c b/auto-generated/vector-crypto/api-testing/vbrev.c new file mode 100644 index 000000000..c59fa4d04 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vbrev.c @@ -0,0 +1,178 @@ +#include +#include + +vuint8mf8_t test_vbrev_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_m(vm, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_m(vm, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_m(vm, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_m(vm, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_m(vm, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_m(vm, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_m(vm, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_m(vm, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_m(vm, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_m(vm, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_m(vm, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_m(vm, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_m(vm, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_m(vm, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_m(vm, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_m(vm, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_m(vm, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_m(vm, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_m(vm, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_m(vm, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_m(vm, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_m(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vbrev8.c b/auto-generated/vector-crypto/api-testing/vbrev8.c new file mode 100644 index 000000000..ba8c19cfc --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vbrev8.c @@ -0,0 +1,178 @@ +#include +#include + +vuint8mf8_t test_vbrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_m(vm, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_m(vm, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_m(vm, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_m(vm, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_m(vm, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_m(vm, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_m(vm, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_m(vm, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_m(vm, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_m(vm, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_m(vm, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_m(vm, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_m(vm, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_m(vm, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_m(vm, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_m(vm, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_m(vm, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_m(vm, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_m(vm, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_m(vm, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_m(vm, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_m(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vclmul.c b/auto-generated/vector-crypto/api-testing/vclmul.c new file mode 100644 index 000000000..b04735718 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vclmul.c @@ -0,0 +1,74 @@ +#include +#include + +vuint64m1_t test_vclmul_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8(vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_m(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_vx_u64m1_m(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_m(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_vx_u64m2_m(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_m(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_vx_u64m4_m(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_m(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_vx_u64m8_m(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vclmulh.c b/auto-generated/vector-crypto/api-testing/vclmulh.c new file mode 100644 index 000000000..9a6abaea4 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vclmulh.c @@ -0,0 +1,74 @@ +#include +#include + +vuint64m1_t test_vclmulh_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8(vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_m(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m1_m(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_m(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m2_m(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_m(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m4_m(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_m(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m8_m(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vclz.c b/auto-generated/vector-crypto/api-testing/vclz.c new file mode 100644 index 000000000..fc05c6572 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vclz.c @@ -0,0 +1,178 @@ +#include +#include + +vuint8mf8_t test_vclz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_m(vm, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_m(vm, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_m(vm, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_m(vm, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_m(vm, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_m(vm, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_m(vm, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_m(vm, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_m(vm, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_m(vm, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_m(vm, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_m(vm, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_m(vm, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_m(vm, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_m(vm, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_m(vm, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_m(vm, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_m(vm, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_m(vm, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_m(vm, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_m(vm, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_m(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vcpop.c b/auto-generated/vector-crypto/api-testing/vcpop.c new file mode 100644 index 000000000..b0c500152 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vcpop.c @@ -0,0 +1,178 @@ +#include +#include + +vuint8mf8_t test_vcpop_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_m(vm, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_m(vm, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_m(vm, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_m(vm, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_m(vm, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_m(vm, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_m(vm, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_m(vm, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_m(vm, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_m(vm, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_m(vm, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_m(vm, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_m(vm, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_m(vm, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_m(vm, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_m(vm, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_m(vm, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_m(vm, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_m(vm, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_m(vm, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_m(vm, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_m(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vctz.c b/auto-generated/vector-crypto/api-testing/vctz.c new file mode 100644 index 000000000..7635ae51e --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vctz.c @@ -0,0 +1,178 @@ +#include +#include + +vuint8mf8_t test_vctz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_m(vm, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_m(vm, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_m(vm, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_m(vm, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_m(vm, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_m(vm, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_m(vm, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_m(vm, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_m(vm, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_m(vm, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_m(vm, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_m(vm, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_m(vm, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_m(vm, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_m(vm, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_m(vm, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_m(vm, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_m(vm, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_m(vm, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_m(vm, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_m(vm, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_m(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vghsh.c b/auto-generated/vector-crypto/api-testing/vghsh.c new file mode 100644 index 000000000..3642b6179 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vghsh.c @@ -0,0 +1,27 @@ +#include +#include + +vuint32mf2_t test_vghsh_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32mf2(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vghsh_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m1(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vghsh_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m2(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vghsh_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m4(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vghsh_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m8(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vgmul.c b/auto-generated/vector-crypto/api-testing/vgmul.c new file mode 100644 index 000000000..684fac34f --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vgmul.c @@ -0,0 +1,23 @@ +#include +#include + +vuint32mf2_t test_vgmul_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vgmul_vv_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vgmul_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vgmul_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vgmul_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vgmul_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m8(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vrev8.c b/auto-generated/vector-crypto/api-testing/vrev8.c new file mode 100644 index 000000000..06acaaba5 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vrev8.c @@ -0,0 +1,178 @@ +#include +#include + +vuint8mf8_t test_vrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_m(vm, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_m(vm, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_m(vm, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_m(vm, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_m(vm, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_m(vm, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_m(vm, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_m(vm, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_m(vm, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_m(vm, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_m(vm, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_m(vm, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_m(vm, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_m(vm, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_m(vm, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_m(vm, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_m(vm, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_m(vm, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_m(vm, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_m(vm, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_m(vm, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_m(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vrol.c b/auto-generated/vector-crypto/api-testing/vrol.c new file mode 100644 index 000000000..5fd4b2c37 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vrol.c @@ -0,0 +1,401 @@ +#include +#include + +vuint8mf8_t test_vrol_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8(vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8(vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4(vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4(vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2(vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2(vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1(vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1(vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2(vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2(vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4(vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4(vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8(vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8(vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16mf4(vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4(vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16mf2(vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2(vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1(vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1(vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2(vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2(vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4(vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4(vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8(vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8(vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32mf2(vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2(vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1(vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1(vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2(vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2(vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4(vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4(vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8(vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8(vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8(vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8mf8_m(vm, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8mf8_m(vm, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8mf4_m(vm, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8mf4_m(vm, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8mf2_m(vm, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8mf2_m(vm, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8m1_m(vm, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8m1_m(vm, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8m2_m(vm, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8m2_m(vm, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8m4_m(vm, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8m4_m(vm, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8m8_m(vm, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8m8_m(vm, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_m(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16mf4_m(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_m(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16mf2_m(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16m1_m(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16m1_m(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16m2_m(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16m2_m(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16m4_m(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16m4_m(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16m8_m(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16m8_m(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_m(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u32mf2_m(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32m1_m(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u32m1_m(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32m2_m(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u32m2_m(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32m4_m(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u32m4_m(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32m8_m(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u32m8_m(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vrol_vv_u64m1_m(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u64m1_m(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u64m2_m(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u64m2_m(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u64m4_m(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u64m4_m(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u64m8_m(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u64m8_m(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vror.c b/auto-generated/vector-crypto/api-testing/vror.c new file mode 100644 index 000000000..56e23b2a6 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vror.c @@ -0,0 +1,401 @@ +#include +#include + +vuint8mf8_t test_vror_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8(vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8(vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4(vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4(vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2(vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2(vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1(vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1(vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2(vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2(vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4(vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4(vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8(vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8(vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vror_vv_u16mf4(vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4(vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vror_vv_u16mf2(vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2(vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1(vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1(vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2(vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2(vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4(vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4(vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8(vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8(vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vror_vv_u32mf2(vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2(vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1(vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1(vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2(vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2(vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4(vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4(vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8(vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8(vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8(vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vror_vv_u8mf8_m(vm, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8mf8_m(vm, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vror_vv_u8mf4_m(vm, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8mf4_m(vm, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vror_vv_u8mf2_m(vm, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8mf2_m(vm, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vror_vv_u8m1_m(vm, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8m1_m(vm, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vror_vv_u8m2_m(vm, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8m2_m(vm, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vror_vv_u8m4_m(vm, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8m4_m(vm, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vror_vv_u8m8_m(vm, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8m8_m(vm, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_m(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16mf4_m(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_m(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16mf2_m(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vror_vv_u16m1_m(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16m1_m(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vror_vv_u16m2_m(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16m2_m(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vror_vv_u16m4_m(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16m4_m(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vror_vv_u16m8_m(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16m8_m(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_m(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u32mf2_m(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vror_vv_u32m1_m(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u32m1_m(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vror_vv_u32m2_m(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u32m2_m(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vror_vv_u32m4_m(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u32m4_m(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vror_vv_u32m8_m(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u32m8_m(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vror_vv_u64m1_m(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u64m1_m(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vror_vv_u64m2_m(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u64m2_m(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vror_vv_u64m4_m(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u64m4_m(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vror_vv_u64m8_m(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u64m8_m(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vsha2ch.c b/auto-generated/vector-crypto/api-testing/vsha2ch.c new file mode 100644 index 000000000..53fcaf393 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vsha2ch.c @@ -0,0 +1,47 @@ +#include +#include + +vuint32mf2_t test_vsha2ch_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32mf2(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ch_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m1(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ch_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m2(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ch_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m4(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ch_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m8(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ch_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m1(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ch_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m2(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ch_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m4(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ch_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m8(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vsha2cl.c b/auto-generated/vector-crypto/api-testing/vsha2cl.c new file mode 100644 index 000000000..d8d72d0c0 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vsha2cl.c @@ -0,0 +1,47 @@ +#include +#include + +vuint32mf2_t test_vsha2cl_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32mf2(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2cl_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m1(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2cl_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m2(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2cl_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m4(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2cl_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m8(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2cl_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m1(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2cl_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m2(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2cl_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m4(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2cl_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m8(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vsha2ms.c b/auto-generated/vector-crypto/api-testing/vsha2ms.c new file mode 100644 index 000000000..bd1e17413 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vsha2ms.c @@ -0,0 +1,47 @@ +#include +#include + +vuint32mf2_t test_vsha2ms_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32mf2(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ms_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m1(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ms_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m2(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ms_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m4(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ms_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m8(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ms_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m1(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ms_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m2(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ms_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m4(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ms_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m8(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vsm3c.c b/auto-generated/vector-crypto/api-testing/vsm3c.c new file mode 100644 index 000000000..4baa33693 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vsm3c.c @@ -0,0 +1,23 @@ +#include +#include + +vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm3c_vi_u32mf2(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm3c_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m1(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm3c_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m2(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m4(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m8(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vsm3me.c b/auto-generated/vector-crypto/api-testing/vsm3me.c new file mode 100644 index 000000000..790f7e5b0 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vsm3me.c @@ -0,0 +1,23 @@ +#include +#include + +vuint32mf2_t test_vsm3me_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vsm3me_vv_u32mf2(vs2, vs1, vl); +} + +vuint32m1_t test_vsm3me_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m1(vs2, vs1, vl); +} + +vuint32m2_t test_vsm3me_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m2(vs2, vs1, vl); +} + +vuint32m4_t test_vsm3me_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m4(vs2, vs1, vl); +} + +vuint32m8_t test_vsm3me_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m8(vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vsm4k.c b/auto-generated/vector-crypto/api-testing/vsm4k.c new file mode 100644 index 000000000..739baa15e --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vsm4k.c @@ -0,0 +1,22 @@ +#include +#include + +vuint32mf2_t test_vsm4k_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32mf2(vs2, 0, vl); +} + +vuint32m1_t test_vsm4k_vi_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m1(vs2, 0, vl); +} + +vuint32m2_t test_vsm4k_vi_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m2(vs2, 0, vl); +} + +vuint32m4_t test_vsm4k_vi_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m4(vs2, 0, vl); +} + +vuint32m8_t test_vsm4k_vi_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m8(vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vsm4r.c b/auto-generated/vector-crypto/api-testing/vsm4r.c new file mode 100644 index 000000000..069127b7b --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vsm4r.c @@ -0,0 +1,93 @@ +#include +#include + +vuint32mf2_t test_vsm4r_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vv_u32mf2(vd, vs2, vl); +} + +vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m8(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m1(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m8(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m2(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m8(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m4(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m4_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m4_u32m8(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m8(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vwsll.c b/auto-generated/vector-crypto/api-testing/vwsll.c new file mode 100644 index 000000000..acfadeff7 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vwsll.c @@ -0,0 +1,273 @@ +#include +#include + +vuint16mf4_t test_vwsll_vv_u16mf4(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4(vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4(vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2(vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2(vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1(vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1(vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2(vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2(vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4(vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4(vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8(vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8(vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32mf2(vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2(vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1(vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1(vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2(vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2(vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4(vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4(vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8(vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8(vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8(vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_m(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16mf4_m(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_m(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16mf2_m(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_m(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16m1_m(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16m2_m(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16m2_m(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16m4_m(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16m4_m(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16m8_m(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16m8_m(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_m(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u32mf2_m(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_m(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u32m1_m(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_m(vbool16_t vm, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_m(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u32m2_m(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32m4_m(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u32m4_m(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32m8_m(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u32m8_m(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_m(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u64m1_m(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_m(vbool32_t vm, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_m(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u64m2_m(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_m(vbool16_t vm, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_m(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u64m4_m(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u64m8_m(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u64m8_m(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/intrinsic_funcs.adoc b/auto-generated/vector-crypto/intrinsic_funcs.adoc new file mode 100644 index 000000000..cb14f0942 --- /dev/null +++ b/auto-generated/vector-crypto/intrinsic_funcs.adoc @@ -0,0 +1,1288 @@ + +=== Zvbb - Vector Bit-manipulation used in Cryptography + +[[]] +==== Vector Bit-manipulation used in Cryptography - Bitwise And-Not + +[,c] +---- +vuint8mf8_t __riscv_vandn_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, + size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, + size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, + size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, + size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, + size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, + size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, + size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, + uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, + size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, + size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, + size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, + uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, + uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, + size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, + size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl); +---- + +[[]] +==== Vector Basic Bit-manipulation - Reverse + +[,c] +---- +vuint8mf8_t __riscv_vbrev_v_u8mf8(vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4(vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2(vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1(vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2(vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4(vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8(vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4(vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2(vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1(vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2(vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4(vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8(vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2(vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1(vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2(vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4(vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8(vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1(vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2(vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4(vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8(vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1(vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2(vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4(vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8(vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1(vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2(vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4(vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8(vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1(vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2(vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4(vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8(vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1(vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2(vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4(vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8(vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1(vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2(vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4(vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8(vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1(vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2(vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4(vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8(vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1(vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2(vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4(vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8(vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1(vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2(vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4(vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8(vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[]] +==== Vector Basic Bit-manipulation - Count Bits + +[,c] +---- +vuint8mf8_t __riscv_vclz_v_u8mf8(vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4(vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2(vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1(vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2(vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4(vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8(vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4(vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2(vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1(vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2(vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4(vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8(vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2(vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1(vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2(vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4(vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8(vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1(vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2(vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4(vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8(vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8(vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4(vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2(vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1(vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2(vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4(vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8(vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4(vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2(vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1(vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2(vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4(vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8(vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2(vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1(vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2(vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4(vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8(vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1(vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2(vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4(vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8(vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop_v_u8mf8(vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4(vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2(vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1(vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2(vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4(vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8(vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4(vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2(vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1(vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2(vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4(vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8(vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2(vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1(vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2(vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4(vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8(vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1(vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2(vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4(vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8(vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[]] +==== Vector Bit-manipulation used in Cryptography - Rotate + +[,c] +---- +vuint8mf8_t __riscv_vrol_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, + size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, + size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, + size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, + size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, + size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, + size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, + size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, + size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, + size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, + size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, + size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, + size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, + size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, + size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, + size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, + size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, + size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, + size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, + size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, + size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, + size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, + size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, + size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, + size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, + size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, + size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, + size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, + size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, + size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, + size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, + size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, + size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, + size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, + size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, + size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, + size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, + size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, + size_t vl); +---- + +[[]] +==== Vector Basic Bit-manipulation used - Widening Shift + +[,c] +---- +vuint16mf4_t __riscv_vwsll_vv_u16mf4(vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4(vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2(vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2(vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1(vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2(vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4(vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8(vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2(vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1(vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2(vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4(vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8(vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1(vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2(vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4(vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8(vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_vv_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, + size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, + size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, + size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_m(vbool8_t vm, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, + size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_m(vbool4_t vm, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, + size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_m(vbool2_t vm, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, + size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, + size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_m(vbool16_t vm, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, + size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_m(vbool8_t vm, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, + size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_m(vbool4_t vm, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, + size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_m(vbool32_t vm, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, + size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_m(vbool16_t vm, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, + size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_m(vbool8_t vm, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, + size_t vl); +---- + +=== Zvbc - Vector Carryless Multiplication + +[[]] +==== Vector Carryless Multiplication + +[,c] +---- +vuint64m1_t __riscv_vclmul_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, + uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, + uint64_t rs1, size_t vl); +---- + +=== Zvkg - Vector GCM/GMAC + +[[]] +==== Vector GCM/GMAC + +[,c] +---- +vuint32mf2_t __riscv_vghsh_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vghsh_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vghsh_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vghsh_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vghsh_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vgmul_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vgmul_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vgmul_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vgmul_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vgmul_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +=== Zvkned - NIST Suite: Vector AES Block Cipher + +[[]] +==== Vector AES Encryption + +[,c] +---- +vuint32mf2_t __riscv_vaesef_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32mf2_t __riscv_vaesef_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesef_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesef_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32mf2_t __riscv_vaesem_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesem_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesem_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesem_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +[[]] +==== Vector AES Decryption + +[,c] +---- +vuint32mf2_t __riscv_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32mf2_t __riscv_vaesdf_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesdf_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesdf_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdf_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32mf2_t __riscv_vaesdm_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesdm_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdm_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +[[]] +==== Vector AES-128 Forward KeySchedule generation + +[,c] +---- +vuint32mf2_t __riscv_vaeskf1_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, + size_t vl); +vuint32m1_t __riscv_vaeskf1_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf1_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf1_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf1_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl); +vuint32mf2_t __riscv_vaeskf2_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf2_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf2_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf2_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf2_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + size_t uimm, size_t vl); +---- + +[[]] +==== Vector AES round zero + +[,c] +---- +vuint32mf2_t __riscv_vaesz_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl); +---- + +=== Zvknh - NIST Suite: Vector SHA-2 Secure Hash + +[[]] +==== Vector SHA-2 message schedule + +[,c] +---- +vuint32mf2_t __riscv_vsha2ms_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ms_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ms_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ms_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ms_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ms_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ms_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ms_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ms_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +---- + +[[]] +==== Vector SHA-2 two rounds of compression + +[,c] +---- +vuint32mf2_t __riscv_vsha2ch_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ch_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ch_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ch_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ch_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ch_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ch_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ch_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ch_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vsha2cl_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2cl_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2cl_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2cl_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2cl_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2cl_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2cl_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2cl_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2cl_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +---- + +=== Zvksed - ShangMi Suite: SM4 Block Cipher + +[[]] +==== Vector SM4 KeyExpansion + +[,c] +---- +vuint32mf2_t __riscv_vsm4k_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm4k_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm4k_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm4k_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm4k_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl); +---- + +[[]] +==== Vector SM4 Rounds + +[,c] +---- +vuint32mf2_t __riscv_vsm4r_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32mf2_t __riscv_vsm4r_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vsm4r_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vsm4r_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vsm4r_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vsm4r_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +=== Zvksh - ShangMi Suite: SM3 Secure Hash + +[[]] +==== Vector SM3 Message Expansion + +[,c] +---- +vuint32mf2_t __riscv_vsm3me_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32m1_t __riscv_vsm3me_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m2_t __riscv_vsm3me_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m4_t __riscv_vsm3me_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m8_t __riscv_vsm3me_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +---- + +[[]] +==== Vector SM3 Compression + +[,c] +---- +vuint32mf2_t __riscv_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm3c_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, + size_t vl); +vuint32m2_t __riscv_vsm3c_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, + size_t vl); +vuint32m4_t __riscv_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, + size_t vl); +vuint32m8_t __riscv_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, + size_t vl); +---- diff --git a/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc b/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc new file mode 100644 index 000000000..3ea6f28c5 --- /dev/null +++ b/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc @@ -0,0 +1,826 @@ + +=== Zvbb - Vector Bit-manipulation used in Cryptography + +[[]] +==== Vector Bit-manipulation used in Cryptography - Bitwise And-Not + +[,c] +---- +vuint8mf8_t __riscv_vandn_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, + size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, + size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, + size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, + size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, + size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, + size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, + size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, + uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, + size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, + size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, + size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, + uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, + uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, + size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, + size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl); +---- + +[[]] +==== Vector Basic Bit-manipulation - Reverse + +[,c] +---- +vuint8mf8_t __riscv_vbrev_v_u8mf8(vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4(vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2(vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1(vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2(vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4(vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8(vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4(vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2(vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1(vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2(vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4(vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8(vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2(vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1(vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2(vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4(vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8(vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1(vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2(vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4(vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8(vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1(vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2(vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4(vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8(vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1(vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2(vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4(vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8(vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1(vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2(vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4(vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8(vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1(vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2(vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4(vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8(vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1(vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2(vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4(vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8(vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1(vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2(vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4(vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8(vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1(vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2(vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4(vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8(vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1(vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2(vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4(vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8(vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[]] +==== Vector Basic Bit-manipulation - Count Bits + +[,c] +---- +vuint8mf8_t __riscv_vclz_v_u8mf8(vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4(vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2(vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1(vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2(vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4(vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8(vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4(vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2(vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1(vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2(vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4(vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8(vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2(vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1(vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2(vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4(vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8(vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1(vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2(vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4(vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8(vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8(vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4(vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2(vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1(vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2(vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4(vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8(vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4(vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2(vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1(vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2(vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4(vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8(vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2(vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1(vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2(vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4(vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8(vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1(vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2(vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4(vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8(vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop_v_u8mf8(vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4(vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2(vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1(vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2(vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4(vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8(vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4(vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2(vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1(vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2(vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4(vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8(vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2(vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1(vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2(vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4(vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8(vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1(vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2(vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4(vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8(vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[]] +==== Vector Bit-manipulation used in Cryptography - Rotate + +[,c] +---- +vuint8mf8_t __riscv_vrol_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, + size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, + size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, + size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, + size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, + size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, + size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, + size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, + size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, + size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, + size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, + size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, + size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, + size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, + size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, + size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, + size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, + size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, + size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, + size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, + size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, + size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, + size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, + size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, + size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, + size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, + size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, + size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, + size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, + size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, + size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, + size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, + size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, + size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, + size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, + size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, + size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, + size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, + size_t vl); +---- + +[[]] +==== Vector Basic Bit-manipulation used - Widening Shift + +[,c] +---- +vuint16mf4_t __riscv_vwsll_vv_u16mf4(vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4(vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2(vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2(vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1(vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2(vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4(vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8(vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2(vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1(vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2(vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4(vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8(vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1(vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2(vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4(vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8(vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_vv_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, + size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, + size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, + size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_m(vbool8_t vm, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, + size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_m(vbool4_t vm, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, + size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_m(vbool2_t vm, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, + size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, + size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_m(vbool16_t vm, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, + size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_m(vbool8_t vm, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, + size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_m(vbool4_t vm, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, + size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_m(vbool32_t vm, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, + size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_m(vbool16_t vm, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, + size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_m(vbool8_t vm, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, + size_t vl); +---- diff --git a/auto-generated/vector-crypto/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc b/auto-generated/vector-crypto/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc new file mode 100644 index 000000000..c241d9c9f --- /dev/null +++ b/auto-generated/vector-crypto/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc @@ -0,0 +1,66 @@ + +=== Zvbc - Vector Carryless Multiplication + +[[]] +==== Vector Carryless Multiplication + +[,c] +---- +vuint64m1_t __riscv_vclmul_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, + uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, + uint64_t rs1, size_t vl); +---- diff --git a/auto-generated/vector-crypto/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc b/auto-generated/vector-crypto/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc new file mode 100644 index 000000000..6dc612c83 --- /dev/null +++ b/auto-generated/vector-crypto/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc @@ -0,0 +1,25 @@ + +=== Zvkg - Vector GCM/GMAC + +[[]] +==== Vector GCM/GMAC + +[,c] +---- +vuint32mf2_t __riscv_vghsh_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vghsh_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vghsh_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vghsh_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vghsh_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vgmul_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vgmul_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vgmul_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vgmul_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vgmul_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- diff --git a/auto-generated/vector-crypto/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc b/auto-generated/vector-crypto/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc new file mode 100644 index 000000000..58b7cee3b --- /dev/null +++ b/auto-generated/vector-crypto/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc @@ -0,0 +1,210 @@ + +=== Zvkned - NIST Suite: Vector AES Block Cipher + +[[]] +==== Vector AES Encryption + +[,c] +---- +vuint32mf2_t __riscv_vaesef_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32mf2_t __riscv_vaesef_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesef_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesef_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32mf2_t __riscv_vaesem_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesem_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesem_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesem_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +[[]] +==== Vector AES Decryption + +[,c] +---- +vuint32mf2_t __riscv_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32mf2_t __riscv_vaesdf_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesdf_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesdf_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdf_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32mf2_t __riscv_vaesdm_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesdm_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdm_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +[[]] +==== Vector AES-128 Forward KeySchedule generation + +[,c] +---- +vuint32mf2_t __riscv_vaeskf1_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, + size_t vl); +vuint32m1_t __riscv_vaeskf1_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf1_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf1_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf1_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl); +vuint32mf2_t __riscv_vaeskf2_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf2_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf2_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf2_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf2_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + size_t uimm, size_t vl); +---- + +[[]] +==== Vector AES round zero + +[,c] +---- +vuint32mf2_t __riscv_vaesz_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl); +---- diff --git a/auto-generated/vector-crypto/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc b/auto-generated/vector-crypto/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc new file mode 100644 index 000000000..1e4e030fc --- /dev/null +++ b/auto-generated/vector-crypto/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc @@ -0,0 +1,70 @@ + +=== Zvknh - NIST Suite: Vector SHA-2 Secure Hash + +[[]] +==== Vector SHA-2 message schedule + +[,c] +---- +vuint32mf2_t __riscv_vsha2ms_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ms_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ms_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ms_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ms_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ms_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ms_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ms_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ms_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +---- + +[[]] +==== Vector SHA-2 two rounds of compression + +[,c] +---- +vuint32mf2_t __riscv_vsha2ch_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ch_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ch_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ch_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ch_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ch_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ch_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ch_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ch_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vsha2cl_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2cl_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2cl_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2cl_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2cl_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2cl_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2cl_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2cl_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2cl_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +---- diff --git a/auto-generated/vector-crypto/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc b/auto-generated/vector-crypto/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc new file mode 100644 index 000000000..799221682 --- /dev/null +++ b/auto-generated/vector-crypto/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc @@ -0,0 +1,55 @@ + +=== Zvksed - ShangMi Suite: SM4 Block Cipher + +[[]] +==== Vector SM4 KeyExpansion + +[,c] +---- +vuint32mf2_t __riscv_vsm4k_vi_u32mf2(vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm4k_vi_u32m1(vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm4k_vi_u32m2(vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm4k_vi_u32m4(vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm4k_vi_u32m8(vuint32m8_t vs2, size_t uimm, size_t vl); +---- + +[[]] +==== Vector SM4 Rounds + +[,c] +---- +vuint32mf2_t __riscv_vsm4r_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32mf2_t __riscv_vsm4r_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vsm4r_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vsm4r_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vsm4r_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vsm4r_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- diff --git a/auto-generated/vector-crypto/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc b/auto-generated/vector-crypto/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc new file mode 100644 index 000000000..d9f4983af --- /dev/null +++ b/auto-generated/vector-crypto/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc @@ -0,0 +1,36 @@ + +=== Zvksh - ShangMi Suite: SM3 Secure Hash + +[[]] +==== Vector SM3 Message Expansion + +[,c] +---- +vuint32mf2_t __riscv_vsm3me_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32m1_t __riscv_vsm3me_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m2_t __riscv_vsm3me_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m4_t __riscv_vsm3me_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m8_t __riscv_vsm3me_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +---- + +[[]] +==== Vector SM3 Compression + +[,c] +---- +vuint32mf2_t __riscv_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm3c_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, + size_t vl); +vuint32m2_t __riscv_vsm3c_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, + size_t vl); +vuint32m4_t __riscv_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, + size_t vl); +vuint32m8_t __riscv_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, + size_t vl); +---- diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesdf.c b/auto-generated/vector-crypto/llvm-api-tests/vaesdf.c new file mode 100644 index 000000000..21e8e315d --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesdf.c @@ -0,0 +1,101 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_u32mf2(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m8(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m1(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m8(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m2(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m8(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m4(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m4_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m4_u32m8(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m8(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesdm.c b/auto-generated/vector-crypto/llvm-api-tests/vaesdm.c new file mode 100644 index 000000000..eca20ba2a --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesdm.c @@ -0,0 +1,101 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesdm_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_u32mf2(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m8(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m1(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m8(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m2(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m8(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m4(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m4_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m4_u32m8(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m8(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesef.c b/auto-generated/vector-crypto/llvm-api-tests/vaesef.c new file mode 100644 index 000000000..ba3bcc789 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesef.c @@ -0,0 +1,101 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesef_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vv_u32mf2(vd, vs2, vl); +} + +vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m8(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m1(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m8(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m2(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m8(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m4(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m4_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m4_u32m8(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m8(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesem.c b/auto-generated/vector-crypto/llvm-api-tests/vaesem.c new file mode 100644 index 000000000..73b616b05 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesem.c @@ -0,0 +1,101 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesem_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vv_u32mf2(vd, vs2, vl); +} + +vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m8(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m1(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m8(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m2(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m8(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m4(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m4_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m4_u32m8(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m8(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaeskf1.c b/auto-generated/vector-crypto/llvm-api-tests/vaeskf1.c new file mode 100644 index 000000000..85a704734 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vaeskf1.c @@ -0,0 +1,30 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaeskf1_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32mf2(vs2, 0, vl); +} + +vuint32m1_t test_vaeskf1_vi_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m1(vs2, 0, vl); +} + +vuint32m2_t test_vaeskf1_vi_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m2(vs2, 0, vl); +} + +vuint32m4_t test_vaeskf1_vi_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m4(vs2, 0, vl); +} + +vuint32m8_t test_vaeskf1_vi_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m8(vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaeskf2.c b/auto-generated/vector-crypto/llvm-api-tests/vaeskf2.c new file mode 100644 index 000000000..f40d4c10e --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vaeskf2.c @@ -0,0 +1,31 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaeskf2_vi_u32mf2(vd, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m1(vd, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m2(vd, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m4(vd, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m8(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesz.c b/auto-generated/vector-crypto/llvm-api-tests/vaesz.c new file mode 100644 index 000000000..fd0f962f1 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesz.c @@ -0,0 +1,80 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m8(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m8(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m8(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m4_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m4_u32m8(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vandn.c b/auto-generated/vector-crypto/llvm-api-tests/vandn.c new file mode 100644 index 000000000..9ffd256d1 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vandn.c @@ -0,0 +1,408 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vandn_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8(vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8(vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4(vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4(vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2(vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2(vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1(vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1(vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2(vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2(vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4(vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4(vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8(vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8(vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16mf4(vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4(vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16mf2(vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2(vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1(vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1(vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2(vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2(vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4(vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4(vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8(vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8(vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32mf2(vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2(vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1(vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1(vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2(vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2(vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4(vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4(vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8(vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8(vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8(vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_m(vm, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8mf8_m(vm, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_m(vm, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8mf4_m(vm, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_m(vm, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8mf2_m(vm, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8m1_m(vm, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8m1_m(vm, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8m2_m(vm, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8m2_m(vm, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8m4_m(vm, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8m4_m(vm, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8m8_m(vm, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8m8_m(vm, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_m(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_m(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_m(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_m(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_m(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_vx_u16m1_m(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16m2_m(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_vx_u16m2_m(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16m4_m(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_vx_u16m4_m(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16m8_m(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_vx_u16m8_m(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_m(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_m(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_m(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_vx_u32m1_m(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_m(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_vx_u32m2_m(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32m4_m(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_vx_u32m4_m(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32m8_m(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_vx_u32m8_m(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_m(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn_vx_u64m1_m(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_m(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn_vx_u64m2_m(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_m(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn_vx_u64m4_m(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u64m8_m(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn_vx_u64m8_m(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vbrev.c b/auto-generated/vector-crypto/llvm-api-tests/vbrev.c new file mode 100644 index 000000000..40172b4b2 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vbrev.c @@ -0,0 +1,185 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vbrev_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_m(vm, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_m(vm, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_m(vm, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_m(vm, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_m(vm, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_m(vm, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_m(vm, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_m(vm, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_m(vm, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_m(vm, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_m(vm, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_m(vm, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_m(vm, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_m(vm, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_m(vm, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_m(vm, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_m(vm, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_m(vm, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_m(vm, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_m(vm, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_m(vm, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_m(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vbrev8.c b/auto-generated/vector-crypto/llvm-api-tests/vbrev8.c new file mode 100644 index 000000000..27e4b77fc --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vbrev8.c @@ -0,0 +1,185 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vbrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_m(vm, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_m(vm, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_m(vm, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_m(vm, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_m(vm, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_m(vm, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_m(vm, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_m(vm, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_m(vm, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_m(vm, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_m(vm, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_m(vm, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_m(vm, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_m(vm, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_m(vm, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_m(vm, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_m(vm, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_m(vm, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_m(vm, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_m(vm, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_m(vm, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_m(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vclmul.c b/auto-generated/vector-crypto/llvm-api-tests/vclmul.c new file mode 100644 index 000000000..e1b7a953f --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vclmul.c @@ -0,0 +1,81 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint64m1_t test_vclmul_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8(vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_m(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_vx_u64m1_m(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_m(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_vx_u64m2_m(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_m(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_vx_u64m4_m(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_m(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_vx_u64m8_m(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vclmulh.c b/auto-generated/vector-crypto/llvm-api-tests/vclmulh.c new file mode 100644 index 000000000..96dc0cf1f --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vclmulh.c @@ -0,0 +1,81 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint64m1_t test_vclmulh_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8(vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_m(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m1_m(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_m(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m2_m(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_m(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m4_m(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_m(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m8_m(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vclz.c b/auto-generated/vector-crypto/llvm-api-tests/vclz.c new file mode 100644 index 000000000..287966d43 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vclz.c @@ -0,0 +1,185 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vclz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_m(vm, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_m(vm, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_m(vm, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_m(vm, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_m(vm, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_m(vm, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_m(vm, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_m(vm, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_m(vm, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_m(vm, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_m(vm, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_m(vm, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_m(vm, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_m(vm, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_m(vm, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_m(vm, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_m(vm, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_m(vm, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_m(vm, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_m(vm, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_m(vm, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_m(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vcpop.c b/auto-generated/vector-crypto/llvm-api-tests/vcpop.c new file mode 100644 index 000000000..c9402938f --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vcpop.c @@ -0,0 +1,185 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vcpop_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_m(vm, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_m(vm, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_m(vm, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_m(vm, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_m(vm, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_m(vm, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_m(vm, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_m(vm, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_m(vm, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_m(vm, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_m(vm, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_m(vm, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_m(vm, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_m(vm, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_m(vm, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_m(vm, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_m(vm, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_m(vm, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_m(vm, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_m(vm, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_m(vm, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_m(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vctz.c b/auto-generated/vector-crypto/llvm-api-tests/vctz.c new file mode 100644 index 000000000..4f78b7c06 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vctz.c @@ -0,0 +1,185 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vctz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_m(vm, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_m(vm, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_m(vm, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_m(vm, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_m(vm, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_m(vm, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_m(vm, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_m(vm, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_m(vm, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_m(vm, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_m(vm, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_m(vm, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_m(vm, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_m(vm, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_m(vm, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_m(vm, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_m(vm, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_m(vm, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_m(vm, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_m(vm, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_m(vm, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_m(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vghsh.c b/auto-generated/vector-crypto/llvm-api-tests/vghsh.c new file mode 100644 index 000000000..8b5c8bd9e --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vghsh.c @@ -0,0 +1,35 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vghsh_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32mf2(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vghsh_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m1(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vghsh_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m2(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vghsh_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m4(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vghsh_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m8(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vgmul.c b/auto-generated/vector-crypto/llvm-api-tests/vgmul.c new file mode 100644 index 000000000..b7fe95b7e --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vgmul.c @@ -0,0 +1,31 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vgmul_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vgmul_vv_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vgmul_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vgmul_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vgmul_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vgmul_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m8(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vrev8.c b/auto-generated/vector-crypto/llvm-api-tests/vrev8.c new file mode 100644 index 000000000..f446fbba0 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vrev8.c @@ -0,0 +1,185 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_m(vm, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_m(vm, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_m(vm, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_m(vm, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_m(vm, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_m(vm, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_m(vm, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_m(vm, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_m(vm, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_m(vm, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_m(vm, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_m(vm, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_m(vm, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_m(vm, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_m(vm, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_m(vm, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_m(vm, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_m(vm, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_m(vm, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_m(vm, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_m(vm, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_m(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vrol.c b/auto-generated/vector-crypto/llvm-api-tests/vrol.c new file mode 100644 index 000000000..75ac4fb6b --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vrol.c @@ -0,0 +1,408 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkb \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vrol_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8(vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8(vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4(vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4(vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2(vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2(vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1(vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1(vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2(vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2(vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4(vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4(vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8(vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8(vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16mf4(vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4(vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16mf2(vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2(vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1(vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1(vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2(vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2(vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4(vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4(vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8(vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8(vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32mf2(vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2(vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1(vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1(vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2(vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2(vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4(vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4(vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8(vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8(vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8(vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8mf8_m(vm, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8mf8_m(vm, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8mf4_m(vm, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8mf4_m(vm, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8mf2_m(vm, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8mf2_m(vm, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8m1_m(vm, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8m1_m(vm, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8m2_m(vm, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8m2_m(vm, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8m4_m(vm, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8m4_m(vm, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8m8_m(vm, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8m8_m(vm, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_m(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16mf4_m(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_m(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16mf2_m(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16m1_m(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16m1_m(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16m2_m(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16m2_m(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16m4_m(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16m4_m(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16m8_m(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16m8_m(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_m(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u32mf2_m(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32m1_m(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u32m1_m(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32m2_m(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u32m2_m(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32m4_m(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u32m4_m(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32m8_m(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u32m8_m(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vrol_vv_u64m1_m(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u64m1_m(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u64m2_m(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u64m2_m(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u64m4_m(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u64m4_m(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u64m8_m(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u64m8_m(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vror.c b/auto-generated/vector-crypto/llvm-api-tests/vror.c new file mode 100644 index 000000000..a3e2aeb24 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vror.c @@ -0,0 +1,408 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkb \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vror_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8(vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8(vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4(vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4(vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2(vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2(vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1(vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1(vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2(vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2(vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4(vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4(vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8(vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8(vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vror_vv_u16mf4(vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4(vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vror_vv_u16mf2(vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2(vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1(vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1(vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2(vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2(vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4(vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4(vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8(vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8(vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vror_vv_u32mf2(vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2(vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1(vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1(vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2(vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2(vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4(vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4(vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8(vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8(vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8(vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vror_vv_u8mf8_m(vm, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8mf8_m(vm, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vror_vv_u8mf4_m(vm, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8mf4_m(vm, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vror_vv_u8mf2_m(vm, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8mf2_m(vm, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vror_vv_u8m1_m(vm, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8m1_m(vm, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vror_vv_u8m2_m(vm, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8m2_m(vm, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vror_vv_u8m4_m(vm, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8m4_m(vm, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vror_vv_u8m8_m(vm, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8m8_m(vm, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_m(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16mf4_m(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_m(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16mf2_m(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vror_vv_u16m1_m(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16m1_m(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vror_vv_u16m2_m(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16m2_m(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vror_vv_u16m4_m(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16m4_m(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vror_vv_u16m8_m(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16m8_m(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_m(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u32mf2_m(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vror_vv_u32m1_m(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u32m1_m(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vror_vv_u32m2_m(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u32m2_m(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vror_vv_u32m4_m(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u32m4_m(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vror_vv_u32m8_m(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u32m8_m(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vror_vv_u64m1_m(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u64m1_m(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vror_vv_u64m2_m(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u64m2_m(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vror_vv_u64m4_m(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u64m4_m(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vror_vv_u64m8_m(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u64m8_m(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsha2ch.c b/auto-generated/vector-crypto/llvm-api-tests/vsha2ch.c new file mode 100644 index 000000000..1f6588d4d --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vsha2ch.c @@ -0,0 +1,55 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2ch_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32mf2(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ch_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m1(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ch_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m2(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ch_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m4(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ch_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m8(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ch_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m1(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ch_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m2(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ch_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m4(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ch_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m8(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsha2cl.c b/auto-generated/vector-crypto/llvm-api-tests/vsha2cl.c new file mode 100644 index 000000000..203c7c95e --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vsha2cl.c @@ -0,0 +1,55 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2cl_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32mf2(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2cl_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m1(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2cl_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m2(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2cl_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m4(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2cl_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m8(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2cl_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m1(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2cl_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m2(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2cl_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m4(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2cl_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m8(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsha2ms.c b/auto-generated/vector-crypto/llvm-api-tests/vsha2ms.c new file mode 100644 index 000000000..76756a0df --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vsha2ms.c @@ -0,0 +1,55 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2ms_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32mf2(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ms_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m1(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ms_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m2(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ms_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m4(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ms_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m8(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ms_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m1(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ms_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m2(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ms_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m4(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ms_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m8(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsm3c.c b/auto-generated/vector-crypto/llvm-api-tests/vsm3c.c new file mode 100644 index 000000000..e17e45eef --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vsm3c.c @@ -0,0 +1,31 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvksh \ +// RUN: -target-feature +zvl512b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm3c_vi_u32mf2(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm3c_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m1(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm3c_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m2(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m4(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m8(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsm3me.c b/auto-generated/vector-crypto/llvm-api-tests/vsm3me.c new file mode 100644 index 000000000..41d046bd4 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vsm3me.c @@ -0,0 +1,31 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvksh \ +// RUN: -target-feature +zvl512b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm3me_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vsm3me_vv_u32mf2(vs2, vs1, vl); +} + +vuint32m1_t test_vsm3me_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m1(vs2, vs1, vl); +} + +vuint32m2_t test_vsm3me_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m2(vs2, vs1, vl); +} + +vuint32m4_t test_vsm3me_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m4(vs2, vs1, vl); +} + +vuint32m8_t test_vsm3me_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m8(vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsm4k.c b/auto-generated/vector-crypto/llvm-api-tests/vsm4k.c new file mode 100644 index 000000000..da0dfdbed --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vsm4k.c @@ -0,0 +1,30 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm4k_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32mf2(vs2, 0, vl); +} + +vuint32m1_t test_vsm4k_vi_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m1(vs2, 0, vl); +} + +vuint32m2_t test_vsm4k_vi_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m2(vs2, 0, vl); +} + +vuint32m4_t test_vsm4k_vi_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m4(vs2, 0, vl); +} + +vuint32m8_t test_vsm4k_vi_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m8(vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsm4r.c b/auto-generated/vector-crypto/llvm-api-tests/vsm4r.c new file mode 100644 index 000000000..44bda79e3 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vsm4r.c @@ -0,0 +1,101 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm4r_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vv_u32mf2(vd, vs2, vl); +} + +vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m8(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m1(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m8(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m2(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m8(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m4(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m4_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m4_u32m8(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m8(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vwsll.c b/auto-generated/vector-crypto/llvm-api-tests/vwsll.c new file mode 100644 index 000000000..a37286747 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vwsll.c @@ -0,0 +1,280 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint16mf4_t test_vwsll_vv_u16mf4(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4(vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4(vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2(vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2(vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1(vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1(vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2(vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2(vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4(vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4(vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8(vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8(vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32mf2(vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2(vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1(vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1(vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2(vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2(vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4(vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4(vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8(vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8(vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8(vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_m(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16mf4_m(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_m(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16mf2_m(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_m(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16m1_m(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16m2_m(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16m2_m(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16m4_m(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16m4_m(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16m8_m(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16m8_m(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_m(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u32mf2_m(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_m(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u32m1_m(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_m(vbool16_t vm, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_m(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u32m2_m(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32m4_m(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u32m4_m(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32m8_m(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u32m8_m(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_m(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u64m1_m(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_m(vbool32_t vm, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_m(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u64m2_m(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_m(vbool16_t vm, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_m(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u64m4_m(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u64m8_m(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u64m8_m(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdf.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdf.c new file mode 100644 index 000000000..8d2efcc47 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdf.c @@ -0,0 +1,101 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vv(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vv(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vv(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vv(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf_vv(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdm.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdm.c new file mode 100644 index 000000000..1daf37f50 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdm.c @@ -0,0 +1,101 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesdm_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vv(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vv(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vv(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vv(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm_vv(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesef.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesef.c new file mode 100644 index 000000000..9d38a49c8 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesef.c @@ -0,0 +1,101 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesef_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vv(vd, vs2, vl); +} + +vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vv(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vv(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vv(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef_vv(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesem.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesem.c new file mode 100644 index 000000000..a91b0075c --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesem.c @@ -0,0 +1,101 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesem_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vv(vd, vs2, vl); +} + +vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vv(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vv(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vv(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem_vv(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf1.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf1.c new file mode 100644 index 000000000..62938b00f --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf1.c @@ -0,0 +1,30 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaeskf1_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} + +vuint32m1_t test_vaeskf1_vi_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} + +vuint32m2_t test_vaeskf1_vi_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} + +vuint32m4_t test_vaeskf1_vi_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} + +vuint32m8_t test_vaeskf1_vi_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf2.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf2.c new file mode 100644 index 000000000..2d13d1171 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf2.c @@ -0,0 +1,31 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesz.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesz.c new file mode 100644 index 000000000..80782358f --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesz.c @@ -0,0 +1,80 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vandn.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vandn.c new file mode 100644 index 000000000..3f4f7fcad --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vandn.c @@ -0,0 +1,408 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vandn_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev.c new file mode 100644 index 000000000..0482ec227 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev.c @@ -0,0 +1,185 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vbrev_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev8.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev8.c new file mode 100644 index 000000000..46bf35822 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev8.c @@ -0,0 +1,185 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vbrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vclmul.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vclmul.c new file mode 100644 index 000000000..c9abd238c --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vclmul.c @@ -0,0 +1,81 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint64m1_t test_vclmul_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul(vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul(vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul(vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul(vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vclmulh.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vclmulh.c new file mode 100644 index 000000000..a5016cf2f --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vclmulh.c @@ -0,0 +1,81 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint64m1_t test_vclmulh_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh(vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh(vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh(vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh(vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vclz.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vclz.c new file mode 100644 index 000000000..92340eb2c --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vclz.c @@ -0,0 +1,185 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vclz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vcpop.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vcpop.c new file mode 100644 index 000000000..7de4d28b5 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vcpop.c @@ -0,0 +1,185 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vcpop_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vctz.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vctz.c new file mode 100644 index 000000000..df3e30371 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vctz.c @@ -0,0 +1,185 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vctz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vghsh.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vghsh.c new file mode 100644 index 000000000..05675405b --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vghsh.c @@ -0,0 +1,35 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vghsh_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vghsh_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vghsh_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vghsh_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vghsh_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vgmul.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vgmul.c new file mode 100644 index 000000000..331cfa03f --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vgmul.c @@ -0,0 +1,31 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vgmul_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} + +vuint32m1_t test_vgmul_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} + +vuint32m2_t test_vgmul_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} + +vuint32m4_t test_vgmul_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} + +vuint32m8_t test_vgmul_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vrev8.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vrev8.c new file mode 100644 index 000000000..5026de6d0 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vrev8.c @@ -0,0 +1,185 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vrol.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vrol.c new file mode 100644 index 000000000..0eb55a7c1 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vrol.c @@ -0,0 +1,408 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkb \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vrol_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vror.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vror.c new file mode 100644 index 000000000..58a524bd5 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vror.c @@ -0,0 +1,408 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkb \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vror_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ch.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ch.c new file mode 100644 index 000000000..77fe9d052 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ch.c @@ -0,0 +1,55 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2ch_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ch_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ch_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ch_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ch_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ch_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ch_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ch_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ch_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2cl.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2cl.c new file mode 100644 index 000000000..e276391b4 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2cl.c @@ -0,0 +1,55 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2cl_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2cl_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2cl_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2cl_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2cl_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2cl_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2cl_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2cl_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2cl_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ms.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ms.c new file mode 100644 index 000000000..86ff2dad4 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ms.c @@ -0,0 +1,55 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2ms_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ms_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ms_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ms_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ms_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ms_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ms_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ms_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ms_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3c.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3c.c new file mode 100644 index 000000000..6bd7e91ee --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3c.c @@ -0,0 +1,31 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvksh \ +// RUN: -target-feature +zvl512b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm3c_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm3c_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3me.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3me.c new file mode 100644 index 000000000..71c9ffd48 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3me.c @@ -0,0 +1,31 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvksh \ +// RUN: -target-feature +zvl512b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm3me_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} + +vuint32m1_t test_vsm3me_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} + +vuint32m2_t test_vsm3me_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} + +vuint32m4_t test_vsm3me_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} + +vuint32m8_t test_vsm3me_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4k.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4k.c new file mode 100644 index 000000000..3392f6b31 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4k.c @@ -0,0 +1,30 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm4k_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} + +vuint32m1_t test_vsm4k_vi_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} + +vuint32m2_t test_vsm4k_vi_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} + +vuint32m4_t test_vsm4k_vi_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} + +vuint32m8_t test_vsm4k_vi_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4r.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4r.c new file mode 100644 index 000000000..9bd6eb604 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4r.c @@ -0,0 +1,101 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm4r_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} + +vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vwsll.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vwsll.c new file mode 100644 index 000000000..f0c50a31b --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vwsll.c @@ -0,0 +1,280 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint16mf4_t test_vwsll_vv_u16mf4(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_m(vbool16_t vm, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_m(vbool32_t vm, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_m(vbool16_t vm, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesdf.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesdf.c new file mode 100644 index 000000000..89ac4ddae --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesdf.c @@ -0,0 +1,93 @@ +#include +#include + +vuint32mf2_t test_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vv(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vv(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vv(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vv(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf_vv(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesdm.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesdm.c new file mode 100644 index 000000000..25d8104c5 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesdm.c @@ -0,0 +1,93 @@ +#include +#include + +vuint32mf2_t test_vaesdm_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vv(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vv(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vv(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vv(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm_vv(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesef.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesef.c new file mode 100644 index 000000000..32325d1f4 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesef.c @@ -0,0 +1,93 @@ +#include +#include + +vuint32mf2_t test_vaesef_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vv(vd, vs2, vl); +} + +vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vv(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vv(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vv(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef_vv(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesem.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesem.c new file mode 100644 index 000000000..4a52fd90f --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesem.c @@ -0,0 +1,93 @@ +#include +#include + +vuint32mf2_t test_vaesem_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vv(vd, vs2, vl); +} + +vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vv(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vv(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vv(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem_vv(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaeskf1.c b/auto-generated/vector-crypto/overloaded-api-testing/vaeskf1.c new file mode 100644 index 000000000..4bc62bb00 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaeskf1.c @@ -0,0 +1,22 @@ +#include +#include + +vuint32mf2_t test_vaeskf1_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} + +vuint32m1_t test_vaeskf1_vi_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} + +vuint32m2_t test_vaeskf1_vi_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} + +vuint32m4_t test_vaeskf1_vi_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} + +vuint32m8_t test_vaeskf1_vi_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaeskf2.c b/auto-generated/vector-crypto/overloaded-api-testing/vaeskf2.c new file mode 100644 index 000000000..eb6a9d751 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaeskf2.c @@ -0,0 +1,23 @@ +#include +#include + +vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesz.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesz.c new file mode 100644 index 000000000..7f7c1d721 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesz.c @@ -0,0 +1,72 @@ +#include +#include + +vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vandn.c b/auto-generated/vector-crypto/overloaded-api-testing/vandn.c new file mode 100644 index 000000000..6a05a684e --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vandn.c @@ -0,0 +1,401 @@ +#include +#include + +vuint8mf8_t test_vandn_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vbrev.c b/auto-generated/vector-crypto/overloaded-api-testing/vbrev.c new file mode 100644 index 000000000..624410303 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vbrev.c @@ -0,0 +1,178 @@ +#include +#include + +vuint8mf8_t test_vbrev_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vbrev8.c b/auto-generated/vector-crypto/overloaded-api-testing/vbrev8.c new file mode 100644 index 000000000..9d2e33a81 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vbrev8.c @@ -0,0 +1,178 @@ +#include +#include + +vuint8mf8_t test_vbrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vclmul.c b/auto-generated/vector-crypto/overloaded-api-testing/vclmul.c new file mode 100644 index 000000000..8475b9416 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vclmul.c @@ -0,0 +1,74 @@ +#include +#include + +vuint64m1_t test_vclmul_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul(vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul(vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul(vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul(vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vclmulh.c b/auto-generated/vector-crypto/overloaded-api-testing/vclmulh.c new file mode 100644 index 000000000..a2f4a724a --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vclmulh.c @@ -0,0 +1,74 @@ +#include +#include + +vuint64m1_t test_vclmulh_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh(vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh(vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh(vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh(vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vclz.c b/auto-generated/vector-crypto/overloaded-api-testing/vclz.c new file mode 100644 index 000000000..9625fdaaf --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vclz.c @@ -0,0 +1,178 @@ +#include +#include + +vuint8mf8_t test_vclz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vcpop.c b/auto-generated/vector-crypto/overloaded-api-testing/vcpop.c new file mode 100644 index 000000000..520ad7292 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vcpop.c @@ -0,0 +1,178 @@ +#include +#include + +vuint8mf8_t test_vcpop_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vctz.c b/auto-generated/vector-crypto/overloaded-api-testing/vctz.c new file mode 100644 index 000000000..ed4a213cc --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vctz.c @@ -0,0 +1,178 @@ +#include +#include + +vuint8mf8_t test_vctz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vghsh.c b/auto-generated/vector-crypto/overloaded-api-testing/vghsh.c new file mode 100644 index 000000000..59ef64466 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vghsh.c @@ -0,0 +1,27 @@ +#include +#include + +vuint32mf2_t test_vghsh_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vghsh_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vghsh_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vghsh_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vghsh_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vgmul.c b/auto-generated/vector-crypto/overloaded-api-testing/vgmul.c new file mode 100644 index 000000000..e78e620c3 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vgmul.c @@ -0,0 +1,23 @@ +#include +#include + +vuint32mf2_t test_vgmul_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} + +vuint32m1_t test_vgmul_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} + +vuint32m2_t test_vgmul_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} + +vuint32m4_t test_vgmul_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} + +vuint32m8_t test_vgmul_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vrev8.c b/auto-generated/vector-crypto/overloaded-api-testing/vrev8.c new file mode 100644 index 000000000..1d76a3980 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vrev8.c @@ -0,0 +1,178 @@ +#include +#include + +vuint8mf8_t test_vrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vrol.c b/auto-generated/vector-crypto/overloaded-api-testing/vrol.c new file mode 100644 index 000000000..31c6af020 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vrol.c @@ -0,0 +1,401 @@ +#include +#include + +vuint8mf8_t test_vrol_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vror.c b/auto-generated/vector-crypto/overloaded-api-testing/vror.c new file mode 100644 index 000000000..f4398c744 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vror.c @@ -0,0 +1,401 @@ +#include +#include + +vuint8mf8_t test_vror_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsha2ch.c b/auto-generated/vector-crypto/overloaded-api-testing/vsha2ch.c new file mode 100644 index 000000000..492101429 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsha2ch.c @@ -0,0 +1,47 @@ +#include +#include + +vuint32mf2_t test_vsha2ch_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ch_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ch_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ch_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ch_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ch_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ch_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ch_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ch_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsha2cl.c b/auto-generated/vector-crypto/overloaded-api-testing/vsha2cl.c new file mode 100644 index 000000000..8a9124df0 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsha2cl.c @@ -0,0 +1,47 @@ +#include +#include + +vuint32mf2_t test_vsha2cl_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2cl_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2cl_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2cl_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2cl_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2cl_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2cl_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2cl_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2cl_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsha2ms.c b/auto-generated/vector-crypto/overloaded-api-testing/vsha2ms.c new file mode 100644 index 000000000..f4532bec8 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsha2ms.c @@ -0,0 +1,47 @@ +#include +#include + +vuint32mf2_t test_vsha2ms_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ms_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ms_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ms_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ms_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ms_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ms_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ms_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ms_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsm3c.c b/auto-generated/vector-crypto/overloaded-api-testing/vsm3c.c new file mode 100644 index 000000000..d7b5971af --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsm3c.c @@ -0,0 +1,23 @@ +#include +#include + +vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm3c_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm3c_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsm3me.c b/auto-generated/vector-crypto/overloaded-api-testing/vsm3me.c new file mode 100644 index 000000000..f0dfdd5cb --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsm3me.c @@ -0,0 +1,23 @@ +#include +#include + +vuint32mf2_t test_vsm3me_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} + +vuint32m1_t test_vsm3me_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} + +vuint32m2_t test_vsm3me_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} + +vuint32m4_t test_vsm3me_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} + +vuint32m8_t test_vsm3me_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsm4k.c b/auto-generated/vector-crypto/overloaded-api-testing/vsm4k.c new file mode 100644 index 000000000..2f64557c9 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsm4k.c @@ -0,0 +1,22 @@ +#include +#include + +vuint32mf2_t test_vsm4k_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} + +vuint32m1_t test_vsm4k_vi_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} + +vuint32m2_t test_vsm4k_vi_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} + +vuint32m4_t test_vsm4k_vi_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} + +vuint32m8_t test_vsm4k_vi_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsm4r.c b/auto-generated/vector-crypto/overloaded-api-testing/vsm4r.c new file mode 100644 index 000000000..56a4e08d9 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsm4r.c @@ -0,0 +1,93 @@ +#include +#include + +vuint32mf2_t test_vsm4r_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} + +vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vwsll.c b/auto-generated/vector-crypto/overloaded-api-testing/vwsll.c new file mode 100644 index 000000000..3ccf51ea9 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vwsll.c @@ -0,0 +1,273 @@ +#include +#include + +vuint16mf4_t test_vwsll_vv_u16mf4(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_m(vbool16_t vm, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_m(vbool32_t vm, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_m(vbool16_t vm, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs.adoc b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.adoc new file mode 100644 index 000000000..0dc87cdce --- /dev/null +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.adoc @@ -0,0 +1,1098 @@ + +=== Zvbb - Vector Bit-manipulation used in Cryptography + +[[overloaded-]] +==== Vector Bit-manipulation used in Cryptography - Bitwise And-Not + +[,c] +---- +vuint8mf8_t __riscv_vandn(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn(vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn(vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn(vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn(vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn(vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn(vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn(vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn(vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn(vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn(vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn(vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn(vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn(vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn(vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn(vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn(vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn(vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn(vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn(vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn(vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn(vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn(vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint8mf8_t __riscv_vandn(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, + size_t vl); +vuint8mf4_t __riscv_vandn(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint8mf4_t __riscv_vandn(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, + size_t vl); +vuint8mf2_t __riscv_vandn(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint8mf2_t __riscv_vandn(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, + size_t vl); +vuint8m1_t __riscv_vandn(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl); +vuint8m1_t __riscv_vandn(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl); +vuint8m2_t __riscv_vandn(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl); +vuint8m4_t __riscv_vandn(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl); +vuint8m8_t __riscv_vandn(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint16mf4_t __riscv_vandn(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, + size_t vl); +vuint16mf2_t __riscv_vandn(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint16mf2_t __riscv_vandn(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, + size_t vl); +vuint16m1_t __riscv_vandn(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint16m1_t __riscv_vandn(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, + size_t vl); +vuint16m2_t __riscv_vandn(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint16m2_t __riscv_vandn(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, + size_t vl); +vuint16m4_t __riscv_vandn(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint16m4_t __riscv_vandn(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, + size_t vl); +vuint16m8_t __riscv_vandn(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl); +vuint16m8_t __riscv_vandn(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, + size_t vl); +vuint32mf2_t __riscv_vandn(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32mf2_t __riscv_vandn(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, + size_t vl); +vuint32m1_t __riscv_vandn(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m1_t __riscv_vandn(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, + size_t vl); +vuint32m2_t __riscv_vandn(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m2_t __riscv_vandn(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, + size_t vl); +vuint32m4_t __riscv_vandn(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m4_t __riscv_vandn(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, + size_t vl); +vuint32m8_t __riscv_vandn(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32m8_t __riscv_vandn(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, + size_t vl); +vuint64m1_t __riscv_vandn(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vandn(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, + size_t vl); +vuint64m2_t __riscv_vandn(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vandn(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, + size_t vl); +vuint64m4_t __riscv_vandn(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vandn(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, + size_t vl); +vuint64m8_t __riscv_vandn(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vandn(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl); +---- + +[[overloaded-]] +==== Vector Basic Bit-manipulation - Reverse + +[,c] +---- +vuint8mf8_t __riscv_vbrev(vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev(vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev(vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev(vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev(vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev(vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev(vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev(vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev(vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev(vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev(vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev(vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev(vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev(vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev(vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev(vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev(vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev(vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev(vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev(vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev(vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev(vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8(vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8(vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8(vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8(vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8(vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8(vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8(vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8(vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8(vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8(vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8(vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8(vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8(vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8(vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8(vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8(vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8(vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8(vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8(vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8(vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8(vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8(vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8(vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8(vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8(vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8(vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8(vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8(vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8(vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8(vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8(vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8(vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8(vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8(vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8(vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8(vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8(vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8(vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8(vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8(vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8(vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8(vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8(vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8(vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev(vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev(vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev(vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev(vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev(vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev(vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev(vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev(vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev(vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev(vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev(vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev(vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev(vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev(vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev(vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev(vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev(vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev(vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev(vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev(vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev(vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev(vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8(vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8(vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8(vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8(vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8(vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8(vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8(vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8(vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8(vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8(vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8(vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8(vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8(vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8(vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8(vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8(vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8(vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8(vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8(vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8(vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8(vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8(vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8(vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8(vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8(vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8(vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8(vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8(vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8(vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8(vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8(vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8(vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8(vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8(vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8(vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8(vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8(vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8(vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8(vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8(vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8(vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8(vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8(vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8(vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[overloaded-]] +==== Vector Basic Bit-manipulation - Count Bits + +[,c] +---- +vuint8mf8_t __riscv_vclz(vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz(vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz(vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz(vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz(vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz(vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz(vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz(vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz(vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz(vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz(vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz(vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz(vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz(vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz(vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz(vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz(vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz(vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz(vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz(vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz(vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz(vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz(vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz(vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz(vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz(vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz(vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz(vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz(vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz(vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz(vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz(vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz(vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz(vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz(vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz(vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz(vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz(vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz(vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz(vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz(vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz(vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz(vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz(vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz(vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz(vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz(vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz(vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz(vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz(vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz(vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz(vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz(vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz(vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz(vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz(vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz(vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz(vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz(vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz(vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz(vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz(vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz(vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz(vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz(vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz(vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz(vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz(vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz(vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz(vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz(vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz(vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz(vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz(vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz(vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz(vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz(vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz(vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz(vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz(vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz(vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz(vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz(vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz(vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz(vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz(vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz(vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz(vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[overloaded-]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop(vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop(vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop(vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop(vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop(vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop(vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop(vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop(vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop(vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop(vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop(vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop(vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop(vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop(vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop(vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop(vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop(vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop(vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop(vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop(vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop(vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop(vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop(vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop(vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop(vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop(vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop(vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop(vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop(vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop(vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop(vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop(vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop(vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop(vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop(vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop(vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop(vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop(vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop(vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop(vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop(vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop(vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop(vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop(vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[overloaded-]] +==== Vector Bit-manipulation used in Cryptography - Rotate + +[,c] +---- +vuint8mf8_t __riscv_vrol(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol(vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol(vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol(vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol(vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol(vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol(vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol(vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol(vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol(vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol(vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol(vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol(vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol(vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol(vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol(vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol(vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol(vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol(vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol(vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol(vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol(vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol(vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror(vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror(vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror(vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror(vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror(vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror(vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror(vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror(vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror(vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror(vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror(vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror(vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror(vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror(vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror(vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror(vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror(vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror(vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror(vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror(vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror(vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror(vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint8mf8_t __riscv_vrol(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint8mf4_t __riscv_vrol(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint8mf2_t __riscv_vrol(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol(vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol(vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol(vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol(vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint16mf4_t __riscv_vrol(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, + size_t vl); +vuint16mf2_t __riscv_vrol(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint16mf2_t __riscv_vrol(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl); +vuint16m1_t __riscv_vrol(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint16m1_t __riscv_vrol(vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint16m2_t __riscv_vrol(vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint16m4_t __riscv_vrol(vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl); +vuint16m8_t __riscv_vrol(vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32mf2_t __riscv_vrol(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl); +vuint32m1_t __riscv_vrol(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m1_t __riscv_vrol(vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m2_t __riscv_vrol(vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m4_t __riscv_vrol(vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32m8_t __riscv_vrol(vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vrol(vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vrol(vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vrol(vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vrol(vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint8mf8_t __riscv_vror(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint8mf4_t __riscv_vror(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint8mf2_t __riscv_vror(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror(vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror(vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror(vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror(vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint16mf4_t __riscv_vror(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, + size_t vl); +vuint16mf2_t __riscv_vror(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint16mf2_t __riscv_vror(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl); +vuint16m1_t __riscv_vror(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint16m1_t __riscv_vror(vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint16m2_t __riscv_vror(vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint16m4_t __riscv_vror(vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl); +vuint16m8_t __riscv_vror(vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32mf2_t __riscv_vror(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl); +vuint32m1_t __riscv_vror(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m1_t __riscv_vror(vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m2_t __riscv_vror(vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m4_t __riscv_vror(vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32m8_t __riscv_vror(vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vror(vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vror(vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vror(vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vror(vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl); +---- + +[[overloaded-]] +==== Vector Basic Bit-manipulation used - Widening Shift + +[,c] +---- +vuint16mf4_t __riscv_vwsll(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll(vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll(vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll(vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll(vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll(vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll(vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll(vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll(vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll(vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll(vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll(vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll(vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll(vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll(vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll(vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint16mf4_t __riscv_vwsll(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, + size_t vl); +vuint16mf2_t __riscv_vwsll(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint16mf2_t __riscv_vwsll(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, + size_t vl); +vuint16m1_t __riscv_vwsll(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint16m1_t __riscv_vwsll(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl); +vuint16m2_t __riscv_vwsll(vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl); +vuint16m4_t __riscv_vwsll(vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl); +vuint16m8_t __riscv_vwsll(vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint32mf2_t __riscv_vwsll(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, + size_t vl); +vuint32m1_t __riscv_vwsll(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint32m1_t __riscv_vwsll(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl); +vuint32m2_t __riscv_vwsll(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint32m2_t __riscv_vwsll(vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint32m4_t __riscv_vwsll(vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint32m8_t __riscv_vwsll(vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint64m1_t __riscv_vwsll(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl); +vuint64m2_t __riscv_vwsll(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint64m2_t __riscv_vwsll(vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint64m4_t __riscv_vwsll(vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint64m8_t __riscv_vwsll(vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl); +---- + +=== Zvbc - Vector Carryless Multiplication + +[[overloaded-]] +==== Vector Carryless Multiplication + +[,c] +---- +vuint64m1_t __riscv_vclmul(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul(vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul(vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul(vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul(vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh(vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh(vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh(vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh(vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vclmul(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, + size_t vl); +vuint64m2_t __riscv_vclmul(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vclmul(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, + size_t vl); +vuint64m4_t __riscv_vclmul(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vclmul(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, + size_t vl); +vuint64m8_t __riscv_vclmul(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vclmul(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl); +vuint64m1_t __riscv_vclmulh(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vclmulh(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, + size_t vl); +vuint64m2_t __riscv_vclmulh(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vclmulh(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, + size_t vl); +vuint64m4_t __riscv_vclmulh(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vclmulh(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, + size_t vl); +vuint64m8_t __riscv_vclmulh(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vclmulh(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl); +---- + +=== Zvkg - Vector GCM/GMAC + +[[overloaded-]] +==== Vector GCM/GMAC + +[,c] +---- +vuint32mf2_t __riscv_vghsh(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32m1_t __riscv_vghsh(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m2_t __riscv_vghsh(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m4_t __riscv_vghsh(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m8_t __riscv_vghsh(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32mf2_t __riscv_vgmul(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vgmul(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vgmul(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vgmul(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vgmul(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +=== Zvkned - NIST Suite: Vector AES Block Cipher + +[[overloaded-]] +==== Vector AES Encryption + +[,c] +---- +vuint32mf2_t __riscv_vaesef_vv(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef_vs(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vv(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs(vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs(vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs(vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vv(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs(vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs(vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vv(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs(vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vv(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vv(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vs(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vv(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs(vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs(vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs(vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vv(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs(vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs(vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vv(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs(vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vv(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +[[overloaded-]] +==== Vector AES Decryption + +[,c] +---- +vuint32mf2_t __riscv_vaesdf_vv(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf_vs(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vv(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs(vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs(vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs(vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vv(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs(vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs(vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vv(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs(vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vv(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vv(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vs(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vv(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs(vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs(vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs(vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vv(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs(vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs(vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vv(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs(vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vv(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +[[overloaded-]] +==== Vector AES-128 Forward KeySchedule generation + +[,c] +---- +vuint32mf2_t __riscv_vaeskf1(vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf1(vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf1(vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf1(vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf1(vuint32m8_t vs2, size_t uimm, size_t vl); +vuint32mf2_t __riscv_vaeskf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, + size_t vl); +vuint32m1_t __riscv_vaeskf2(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, + size_t vl); +vuint32m2_t __riscv_vaeskf2(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, + size_t vl); +vuint32m4_t __riscv_vaeskf2(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, + size_t vl); +vuint32m8_t __riscv_vaeskf2(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, + size_t vl); +---- + +[[overloaded-]] +==== Vector AES round zero + +[,c] +---- +vuint32mf2_t __riscv_vaesz(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz(vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz(vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz(vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz(vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz(vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz(vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +---- + +=== Zvknh - NIST Suite: Vector SHA-2 Secure Hash + +[[overloaded-]] +==== Vector SHA-2 message schedule + +[,c] +---- +vuint32mf2_t __riscv_vsha2ms(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ms(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m2_t __riscv_vsha2ms(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m4_t __riscv_vsha2ms(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m8_t __riscv_vsha2ms(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint64m1_t __riscv_vsha2ms(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m2_t __riscv_vsha2ms(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m4_t __riscv_vsha2ms(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m8_t __riscv_vsha2ms(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +---- + +[[overloaded-]] +==== Vector SHA-2 two rounds of compression + +[,c] +---- +vuint32mf2_t __riscv_vsha2ch(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ch(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m2_t __riscv_vsha2ch(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m4_t __riscv_vsha2ch(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m8_t __riscv_vsha2ch(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint64m1_t __riscv_vsha2ch(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m2_t __riscv_vsha2ch(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m4_t __riscv_vsha2ch(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m8_t __riscv_vsha2ch(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint32mf2_t __riscv_vsha2cl(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2cl(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m2_t __riscv_vsha2cl(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m4_t __riscv_vsha2cl(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m8_t __riscv_vsha2cl(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint64m1_t __riscv_vsha2cl(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m2_t __riscv_vsha2cl(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m4_t __riscv_vsha2cl(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m8_t __riscv_vsha2cl(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +---- + +=== Zvksed - ShangMi Suite: SM4 Block Cipher + +[[overloaded-]] +==== Vector SM4 KeyExpansion + +[,c] +---- +vuint32mf2_t __riscv_vsm4k(vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm4k(vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm4k(vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm4k(vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm4k(vuint32m8_t vs2, size_t uimm, size_t vl); +---- + +[[overloaded-]] +==== Vector SM4 Rounds + +[,c] +---- +vuint32mf2_t __riscv_vsm4r_vv(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vsm4r_vs(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vv(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs(vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs(vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs(vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vv(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs(vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs(vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vv(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs(vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vv(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +=== Zvksh - ShangMi Suite: SM3 Secure Hash + +[[overloaded-]] +==== Vector SM3 Message Expansion + +[,c] +---- +vuint32mf2_t __riscv_vsm3me(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsm3me(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsm3me(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsm3me(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsm3me(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +---- + +[[overloaded-]] +==== Vector SM3 Compression + +[,c] +---- +vuint32mf2_t __riscv_vsm3c(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, + size_t vl); +vuint32m1_t __riscv_vsm3c(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, + size_t vl); +vuint32m2_t __riscv_vsm3c(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, + size_t vl); +vuint32m4_t __riscv_vsm3c(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, + size_t vl); +vuint32m8_t __riscv_vsm3c(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, + size_t vl); +---- diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc new file mode 100644 index 000000000..48d6c78bd --- /dev/null +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc @@ -0,0 +1,740 @@ + +=== Zvbb - Vector Bit-manipulation used in Cryptography + +[[overloaded-]] +==== Vector Bit-manipulation used in Cryptography - Bitwise And-Not + +[,c] +---- +vuint8mf8_t __riscv_vandn(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn(vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn(vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn(vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn(vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn(vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn(vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn(vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn(vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn(vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn(vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn(vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn(vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn(vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn(vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn(vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn(vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn(vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn(vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn(vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn(vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn(vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn(vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint8mf8_t __riscv_vandn(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, + size_t vl); +vuint8mf4_t __riscv_vandn(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint8mf4_t __riscv_vandn(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, + size_t vl); +vuint8mf2_t __riscv_vandn(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint8mf2_t __riscv_vandn(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, + size_t vl); +vuint8m1_t __riscv_vandn(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl); +vuint8m1_t __riscv_vandn(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl); +vuint8m2_t __riscv_vandn(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl); +vuint8m4_t __riscv_vandn(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl); +vuint8m8_t __riscv_vandn(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint16mf4_t __riscv_vandn(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, + size_t vl); +vuint16mf2_t __riscv_vandn(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint16mf2_t __riscv_vandn(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, + size_t vl); +vuint16m1_t __riscv_vandn(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint16m1_t __riscv_vandn(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, + size_t vl); +vuint16m2_t __riscv_vandn(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint16m2_t __riscv_vandn(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, + size_t vl); +vuint16m4_t __riscv_vandn(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint16m4_t __riscv_vandn(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, + size_t vl); +vuint16m8_t __riscv_vandn(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl); +vuint16m8_t __riscv_vandn(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, + size_t vl); +vuint32mf2_t __riscv_vandn(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32mf2_t __riscv_vandn(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, + size_t vl); +vuint32m1_t __riscv_vandn(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m1_t __riscv_vandn(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, + size_t vl); +vuint32m2_t __riscv_vandn(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m2_t __riscv_vandn(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, + size_t vl); +vuint32m4_t __riscv_vandn(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m4_t __riscv_vandn(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, + size_t vl); +vuint32m8_t __riscv_vandn(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32m8_t __riscv_vandn(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, + size_t vl); +vuint64m1_t __riscv_vandn(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vandn(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, + size_t vl); +vuint64m2_t __riscv_vandn(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vandn(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, + size_t vl); +vuint64m4_t __riscv_vandn(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vandn(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, + size_t vl); +vuint64m8_t __riscv_vandn(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vandn(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl); +---- + +[[overloaded-]] +==== Vector Basic Bit-manipulation - Reverse + +[,c] +---- +vuint8mf8_t __riscv_vbrev(vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev(vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev(vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev(vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev(vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev(vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev(vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev(vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev(vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev(vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev(vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev(vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev(vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev(vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev(vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev(vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev(vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev(vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev(vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev(vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev(vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev(vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8(vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8(vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8(vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8(vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8(vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8(vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8(vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8(vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8(vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8(vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8(vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8(vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8(vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8(vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8(vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8(vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8(vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8(vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8(vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8(vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8(vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8(vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8(vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8(vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8(vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8(vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8(vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8(vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8(vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8(vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8(vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8(vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8(vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8(vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8(vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8(vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8(vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8(vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8(vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8(vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8(vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8(vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8(vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8(vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev(vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev(vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev(vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev(vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev(vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev(vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev(vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev(vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev(vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev(vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev(vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev(vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev(vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev(vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev(vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev(vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev(vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev(vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev(vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev(vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev(vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev(vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8(vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8(vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8(vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8(vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8(vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8(vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8(vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8(vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8(vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8(vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8(vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8(vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8(vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8(vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8(vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8(vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8(vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8(vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8(vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8(vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8(vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8(vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8(vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8(vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8(vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8(vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8(vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8(vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8(vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8(vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8(vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8(vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8(vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8(vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8(vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8(vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8(vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8(vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8(vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8(vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8(vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8(vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8(vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8(vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[overloaded-]] +==== Vector Basic Bit-manipulation - Count Bits + +[,c] +---- +vuint8mf8_t __riscv_vclz(vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz(vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz(vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz(vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz(vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz(vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz(vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz(vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz(vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz(vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz(vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz(vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz(vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz(vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz(vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz(vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz(vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz(vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz(vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz(vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz(vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz(vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz(vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz(vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz(vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz(vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz(vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz(vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz(vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz(vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz(vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz(vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz(vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz(vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz(vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz(vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz(vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz(vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz(vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz(vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz(vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz(vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz(vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz(vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz(vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz(vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz(vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz(vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz(vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz(vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz(vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz(vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz(vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz(vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz(vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz(vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz(vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz(vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz(vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz(vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz(vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz(vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz(vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz(vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz(vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz(vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz(vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz(vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz(vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz(vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz(vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz(vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz(vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz(vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz(vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz(vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz(vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz(vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz(vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz(vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz(vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz(vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz(vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz(vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz(vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz(vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz(vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz(vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[overloaded-]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop(vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop(vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop(vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop(vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop(vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop(vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop(vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop(vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop(vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop(vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop(vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop(vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop(vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop(vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop(vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop(vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop(vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop(vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop(vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop(vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop(vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop(vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop(vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop(vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop(vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop(vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop(vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop(vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop(vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop(vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop(vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop(vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop(vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop(vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop(vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop(vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop(vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop(vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop(vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop(vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop(vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop(vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop(vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop(vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[overloaded-]] +==== Vector Bit-manipulation used in Cryptography - Rotate + +[,c] +---- +vuint8mf8_t __riscv_vrol(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol(vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol(vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol(vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol(vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol(vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol(vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol(vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol(vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol(vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol(vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol(vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol(vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol(vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol(vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol(vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol(vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol(vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol(vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol(vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol(vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol(vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol(vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror(vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror(vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror(vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror(vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror(vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror(vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror(vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror(vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror(vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror(vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror(vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror(vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror(vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror(vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror(vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror(vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror(vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror(vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror(vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror(vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror(vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror(vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint8mf8_t __riscv_vrol(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint8mf4_t __riscv_vrol(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint8mf2_t __riscv_vrol(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol(vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol(vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol(vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol(vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint16mf4_t __riscv_vrol(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, + size_t vl); +vuint16mf2_t __riscv_vrol(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint16mf2_t __riscv_vrol(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl); +vuint16m1_t __riscv_vrol(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint16m1_t __riscv_vrol(vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint16m2_t __riscv_vrol(vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint16m4_t __riscv_vrol(vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl); +vuint16m8_t __riscv_vrol(vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32mf2_t __riscv_vrol(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl); +vuint32m1_t __riscv_vrol(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m1_t __riscv_vrol(vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m2_t __riscv_vrol(vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m4_t __riscv_vrol(vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32m8_t __riscv_vrol(vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vrol(vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vrol(vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vrol(vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vrol(vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint8mf8_t __riscv_vror(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint8mf4_t __riscv_vror(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint8mf2_t __riscv_vror(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror(vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror(vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror(vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror(vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint16mf4_t __riscv_vror(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, + size_t vl); +vuint16mf2_t __riscv_vror(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint16mf2_t __riscv_vror(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl); +vuint16m1_t __riscv_vror(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint16m1_t __riscv_vror(vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint16m2_t __riscv_vror(vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint16m4_t __riscv_vror(vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl); +vuint16m8_t __riscv_vror(vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32mf2_t __riscv_vror(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl); +vuint32m1_t __riscv_vror(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m1_t __riscv_vror(vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m2_t __riscv_vror(vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m4_t __riscv_vror(vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32m8_t __riscv_vror(vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vror(vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vror(vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vror(vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vror(vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl); +---- + +[[overloaded-]] +==== Vector Basic Bit-manipulation used - Widening Shift + +[,c] +---- +vuint16mf4_t __riscv_vwsll(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll(vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll(vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll(vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll(vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll(vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll(vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll(vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll(vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll(vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll(vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll(vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll(vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll(vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll(vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll(vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint16mf4_t __riscv_vwsll(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, + size_t vl); +vuint16mf2_t __riscv_vwsll(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint16mf2_t __riscv_vwsll(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, + size_t vl); +vuint16m1_t __riscv_vwsll(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint16m1_t __riscv_vwsll(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl); +vuint16m2_t __riscv_vwsll(vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl); +vuint16m4_t __riscv_vwsll(vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl); +vuint16m8_t __riscv_vwsll(vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint32mf2_t __riscv_vwsll(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, + size_t vl); +vuint32m1_t __riscv_vwsll(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint32m1_t __riscv_vwsll(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl); +vuint32m2_t __riscv_vwsll(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint32m2_t __riscv_vwsll(vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint32m4_t __riscv_vwsll(vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint32m8_t __riscv_vwsll(vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint64m1_t __riscv_vwsll(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl); +vuint64m2_t __riscv_vwsll(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint64m2_t __riscv_vwsll(vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint64m4_t __riscv_vwsll(vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint64m8_t __riscv_vwsll(vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl); +---- diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc new file mode 100644 index 000000000..46e90836e --- /dev/null +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc @@ -0,0 +1,58 @@ + +=== Zvbc - Vector Carryless Multiplication + +[[overloaded-]] +==== Vector Carryless Multiplication + +[,c] +---- +vuint64m1_t __riscv_vclmul(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul(vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul(vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul(vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul(vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh(vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh(vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh(vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh(vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vclmul(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, + size_t vl); +vuint64m2_t __riscv_vclmul(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vclmul(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, + size_t vl); +vuint64m4_t __riscv_vclmul(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vclmul(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, + size_t vl); +vuint64m8_t __riscv_vclmul(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vclmul(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl); +vuint64m1_t __riscv_vclmulh(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vclmulh(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, + size_t vl); +vuint64m2_t __riscv_vclmulh(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vclmulh(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, + size_t vl); +vuint64m4_t __riscv_vclmulh(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vclmulh(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, + size_t vl); +vuint64m8_t __riscv_vclmulh(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vclmulh(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl); +---- diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc new file mode 100644 index 000000000..b355c3332 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc @@ -0,0 +1,24 @@ + +=== Zvkg - Vector GCM/GMAC + +[[overloaded-]] +==== Vector GCM/GMAC + +[,c] +---- +vuint32mf2_t __riscv_vghsh(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32m1_t __riscv_vghsh(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m2_t __riscv_vghsh(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m4_t __riscv_vghsh(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m8_t __riscv_vghsh(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32mf2_t __riscv_vgmul(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vgmul(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vgmul(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vgmul(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vgmul(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc new file mode 100644 index 000000000..11cdeb958 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc @@ -0,0 +1,135 @@ + +=== Zvkned - NIST Suite: Vector AES Block Cipher + +[[overloaded-]] +==== Vector AES Encryption + +[,c] +---- +vuint32mf2_t __riscv_vaesef_vv(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef_vs(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vv(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs(vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs(vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs(vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vv(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs(vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs(vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vv(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs(vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vv(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vv(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vs(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vv(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs(vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs(vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs(vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vv(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs(vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs(vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vv(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs(vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vv(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +[[overloaded-]] +==== Vector AES Decryption + +[,c] +---- +vuint32mf2_t __riscv_vaesdf_vv(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf_vs(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vv(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs(vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs(vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs(vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vv(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs(vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs(vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vv(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs(vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vv(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vv(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vs(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vv(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs(vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs(vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs(vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vv(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs(vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs(vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vv(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs(vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vv(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +[[overloaded-]] +==== Vector AES-128 Forward KeySchedule generation + +[,c] +---- +vuint32mf2_t __riscv_vaeskf1(vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf1(vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf1(vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf1(vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf1(vuint32m8_t vs2, size_t uimm, size_t vl); +vuint32mf2_t __riscv_vaeskf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, + size_t vl); +vuint32m1_t __riscv_vaeskf2(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, + size_t vl); +vuint32m2_t __riscv_vaeskf2(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, + size_t vl); +vuint32m4_t __riscv_vaeskf2(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, + size_t vl); +vuint32m8_t __riscv_vaeskf2(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, + size_t vl); +---- + +[[overloaded-]] +==== Vector AES round zero + +[,c] +---- +vuint32mf2_t __riscv_vaesz(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz(vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz(vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz(vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz(vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz(vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz(vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +---- diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc new file mode 100644 index 000000000..f88389f32 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc @@ -0,0 +1,70 @@ + +=== Zvknh - NIST Suite: Vector SHA-2 Secure Hash + +[[overloaded-]] +==== Vector SHA-2 message schedule + +[,c] +---- +vuint32mf2_t __riscv_vsha2ms(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ms(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m2_t __riscv_vsha2ms(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m4_t __riscv_vsha2ms(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m8_t __riscv_vsha2ms(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint64m1_t __riscv_vsha2ms(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m2_t __riscv_vsha2ms(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m4_t __riscv_vsha2ms(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m8_t __riscv_vsha2ms(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +---- + +[[overloaded-]] +==== Vector SHA-2 two rounds of compression + +[,c] +---- +vuint32mf2_t __riscv_vsha2ch(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ch(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m2_t __riscv_vsha2ch(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m4_t __riscv_vsha2ch(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m8_t __riscv_vsha2ch(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint64m1_t __riscv_vsha2ch(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m2_t __riscv_vsha2ch(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m4_t __riscv_vsha2ch(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m8_t __riscv_vsha2ch(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint32mf2_t __riscv_vsha2cl(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2cl(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m2_t __riscv_vsha2cl(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m4_t __riscv_vsha2cl(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m8_t __riscv_vsha2cl(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint64m1_t __riscv_vsha2cl(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m2_t __riscv_vsha2cl(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m4_t __riscv_vsha2cl(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m8_t __riscv_vsha2cl(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +---- diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc new file mode 100644 index 000000000..4d67aeef2 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc @@ -0,0 +1,40 @@ + +=== Zvksed - ShangMi Suite: SM4 Block Cipher + +[[overloaded-]] +==== Vector SM4 KeyExpansion + +[,c] +---- +vuint32mf2_t __riscv_vsm4k(vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm4k(vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm4k(vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm4k(vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm4k(vuint32m8_t vs2, size_t uimm, size_t vl); +---- + +[[overloaded-]] +==== Vector SM4 Rounds + +[,c] +---- +vuint32mf2_t __riscv_vsm4r_vv(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vsm4r_vs(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vv(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs(vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs(vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs(vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vv(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs(vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs(vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vv(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs(vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vv(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc new file mode 100644 index 000000000..e576b0ec4 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc @@ -0,0 +1,31 @@ + +=== Zvksh - ShangMi Suite: SM3 Secure Hash + +[[overloaded-]] +==== Vector SM3 Message Expansion + +[,c] +---- +vuint32mf2_t __riscv_vsm3me(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsm3me(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsm3me(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsm3me(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsm3me(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +---- + +[[overloaded-]] +==== Vector SM3 Compression + +[,c] +---- +vuint32mf2_t __riscv_vsm3c(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, + size_t vl); +vuint32m1_t __riscv_vsm3c(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, + size_t vl); +vuint32m2_t __riscv_vsm3c(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, + size_t vl); +vuint32m4_t __riscv_vsm3c(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, + size_t vl); +vuint32m8_t __riscv_vsm3c(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, + size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdf.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdf.c new file mode 100644 index 000000000..8f744ae0e --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdf.c @@ -0,0 +1,97 @@ +#include +#include + +vuint32mf2_t test_vaesdf_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m4_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m4_u32m8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_u32m8_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdm.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdm.c new file mode 100644 index 000000000..04edc8d6c --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdm.c @@ -0,0 +1,97 @@ +#include +#include + +vuint32mf2_t test_vaesdm_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m4_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m4_u32m8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_u32m8_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesef.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesef.c new file mode 100644 index 000000000..c9545d7be --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesef.c @@ -0,0 +1,97 @@ +#include +#include + +vuint32mf2_t test_vaesef_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesef_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m4_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m4_u32m8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaesef_vv_u32m8_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesem.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesem.c new file mode 100644 index 000000000..d3395b8f4 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesem.c @@ -0,0 +1,97 @@ +#include +#include + +vuint32mf2_t test_vaesem_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesem_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m4_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m4_u32m8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaesem_vv_u32m8_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf1.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf1.c new file mode 100644 index 000000000..2836c4176 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf1.c @@ -0,0 +1,27 @@ +#include +#include + +vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaeskf1_vi_u32mf2_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaeskf1_vi_u32m1_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaeskf1_vi_u32m2_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaeskf1_vi_u32m4_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaeskf1_vi_u32m8_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf2.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf2.c new file mode 100644 index 000000000..d631d1095 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf2.c @@ -0,0 +1,27 @@ +#include +#include + +vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaeskf2_vi_u32mf2_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaeskf2_vi_u32m1_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaeskf2_vi_u32m2_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaeskf2_vi_u32m4_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaeskf2_vi_u32m8_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesz.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesz.c new file mode 100644 index 000000000..d54e3f162 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesz.c @@ -0,0 +1,72 @@ +#include +#include + +vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m4_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m4_u32m8_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vandn.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vandn.c new file mode 100644 index 000000000..96b7d173c --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vandn.c @@ -0,0 +1,939 @@ +#include +#include + +vuint8mf8_t test_vandn_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_tu(vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8mf8_tu(vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_tu(vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8mf4_tu(vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_tu(vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8mf2_tu(vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8m1_tu(vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8m1_tu(vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8m2_tu(vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8m2_tu(vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8m4_tu(vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8m4_tu(vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8m8_tu(vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8m8_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_tu(vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vandn_vv_u64m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u64m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u64m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u64m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vandn_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vandn_vv_u64m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u64m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u64m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vbrev.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vbrev.c new file mode 100644 index 000000000..05f10027f --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vbrev.c @@ -0,0 +1,423 @@ +#include +#include + +vuint8mf8_t test_vbrev_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vbrev_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vbrev_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vbrev_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vbrev_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vbrev_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vbrev_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vbrev_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vbrev_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vbrev_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vbrev_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vbrev_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vbrev_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vbrev_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vbrev_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vbrev_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vbrev_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vbrev_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vbrev_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vbrev_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vbrev_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vbrev_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vbrev_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vbrev_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vbrev_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vbrev8.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vbrev8.c new file mode 100644 index 000000000..fcd9aacb4 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vbrev8.c @@ -0,0 +1,423 @@ +#include +#include + +vuint8mf8_t test_vbrev8_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vclmul.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vclmul.c new file mode 100644 index 000000000..3366b4eae --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vclmul.c @@ -0,0 +1,178 @@ +#include +#include + +vuint64m1_t test_vclmul_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmul_vv_u64m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmul_vv_u64m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmul_vv_u64m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmul_vv_u64m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmul_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmul_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmul_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmul_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmul_vv_u64m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmul_vv_u64m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmul_vv_u64m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmul_vv_u64m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vclmulh.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vclmulh.c new file mode 100644 index 000000000..f3ae63f11 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vclmulh.c @@ -0,0 +1,182 @@ +#include +#include + +vuint64m1_t test_vclmulh_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmulh_vv_u64m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmulh_vv_u64m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmulh_vv_u64m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmulh_vv_u64m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmulh_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmulh_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmulh_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmulh_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmulh_vv_u64m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmulh_vv_u64m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmulh_vv_u64m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmulh_vv_u64m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vclz.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vclz.c new file mode 100644 index 000000000..8d3150e97 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vclz.c @@ -0,0 +1,423 @@ +#include +#include + +vuint8mf8_t test_vclz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vclz_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vclz_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vclz_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vclz_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vclz_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vclz_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vclz_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vclz_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vclz_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vclz_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vclz_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vclz_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vclz_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vclz_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vclz_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vclz_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vclz_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vclz_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vclz_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vclz_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vclz_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vclz_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vclz_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vclz_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vclz_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vclz_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vclz_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vclz_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vclz_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vclz_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vclz_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vclz_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vclz_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vclz_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vclz_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vclz_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vclz_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vclz_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vclz_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vclz_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vclz_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vclz_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vclz_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vclz_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vclz_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vclz_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vclz_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vclz_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vclz_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vclz_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vclz_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vcpop.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vcpop.c new file mode 100644 index 000000000..167388be7 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vcpop.c @@ -0,0 +1,423 @@ +#include +#include + +vuint8mf8_t test_vcpop_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vcpop_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vcpop_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vcpop_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vcpop_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vcpop_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vcpop_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vcpop_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vcpop_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vcpop_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vcpop_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vcpop_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vcpop_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vcpop_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vcpop_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vcpop_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vcpop_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vcpop_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vcpop_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vcpop_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vcpop_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vcpop_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vcpop_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vcpop_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vcpop_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vcpop_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vcpop_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vcpop_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vcpop_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vcpop_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vcpop_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vcpop_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vcpop_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vcpop_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vcpop_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vcpop_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vcpop_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vctz.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vctz.c new file mode 100644 index 000000000..ca565cdf0 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vctz.c @@ -0,0 +1,423 @@ +#include +#include + +vuint8mf8_t test_vctz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vctz_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vctz_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vctz_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vctz_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vctz_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vctz_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vctz_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vctz_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vctz_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vctz_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vctz_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vctz_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vctz_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vctz_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vctz_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vctz_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vctz_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vctz_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vctz_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vctz_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vctz_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vctz_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vctz_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vctz_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vctz_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vctz_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vctz_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vctz_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vctz_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vctz_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vctz_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vctz_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vctz_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vctz_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vctz_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vctz_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vctz_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vctz_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vctz_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vctz_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vctz_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vctz_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vctz_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vctz_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vctz_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vctz_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vctz_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vctz_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vctz_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vctz_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vctz_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vghsh.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vghsh.c new file mode 100644 index 000000000..a93cc8fe8 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vghsh.c @@ -0,0 +1,27 @@ +#include +#include + +vuint32mf2_t test_vghsh_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vghsh_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vghsh_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vghsh_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vghsh_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m8_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vgmul.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vgmul.c new file mode 100644 index 000000000..5f176ce1d --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vgmul.c @@ -0,0 +1,23 @@ +#include +#include + +vuint32mf2_t test_vgmul_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vgmul_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vgmul_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vgmul_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vgmul_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vgmul_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m8_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vrev8.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vrev8.c new file mode 100644 index 000000000..71dbc1d32 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vrev8.c @@ -0,0 +1,423 @@ +#include +#include + +vuint8mf8_t test_vrev8_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vrev8_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vrev8_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vrev8_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vrev8_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vrev8_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vrev8_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vrev8_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vrev8_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vrev8_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vrev8_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vrev8_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vrev8_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vrev8_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vrev8_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vrev8_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vrev8_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vrev8_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vrev8_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vrev8_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vrev8_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vrev8_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vrev8_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vrev8_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vrev8_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vrev8_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vrev8_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vrev8_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vrev8_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vrev8_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vrev8_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vrev8_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vrev8_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vrev8_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vrev8_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vrev8_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vrev8_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vrol.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vrol.c new file mode 100644 index 000000000..d48fa7214 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vrol.c @@ -0,0 +1,915 @@ +#include +#include + +vuint8mf8_t test_vrol_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_tu(vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8mf8_tu(vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_tu(vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8mf4_tu(vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_tu(vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8mf2_tu(vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8m1_tu(vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8m1_tu(vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8m2_tu(vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8m2_tu(vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8m4_tu(vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8m4_tu(vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8m8_tu(vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8m8_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16m1_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16m2_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16m4_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16m8_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u32m1_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u32m2_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u32m4_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u32m8_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u64m1_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u64m2_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u64m4_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u64m8_tu(vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vrol_vv_u64m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u64m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u64m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vrol_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vror.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vror.c new file mode 100644 index 000000000..68acedb20 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vror.c @@ -0,0 +1,915 @@ +#include +#include + +vuint8mf8_t test_vror_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_tu(vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8mf8_tu(vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_tu(vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8mf4_tu(vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_tu(vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8mf2_tu(vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vror_vv_u8m1_tu(vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8m1_tu(vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vror_vv_u8m2_tu(vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8m2_tu(vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vror_vv_u8m4_tu(vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8m4_tu(vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vror_vv_u8m8_tu(vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8m8_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16m1_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16m2_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16m4_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16m8_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u32m1_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u32m2_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u32m4_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u32m8_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u64m1_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u64m2_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u64m4_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u64m8_tu(vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vror_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vror_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vror_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vror_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vror_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vror_vv_u16m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vror_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vror_vv_u32m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vror_vv_u32m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vror_vv_u64m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vror_vv_u64m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vror_vv_u64m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vror_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vror_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vror_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vror_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vror_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vror_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vror_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vror_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vror_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vror_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vror_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vror_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vror_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vror_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vror_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vror_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vror_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vror_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vror_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vror_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vror_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2ch.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2ch.c new file mode 100644 index 000000000..e9c0316d1 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2ch.c @@ -0,0 +1,47 @@ +#include +#include + +vuint32mf2_t test_vsha2ch_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ch_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ch_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ch_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ch_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ch_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ch_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ch_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ch_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m8_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2cl.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2cl.c new file mode 100644 index 000000000..05ab17663 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2cl.c @@ -0,0 +1,47 @@ +#include +#include + +vuint32mf2_t test_vsha2cl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2cl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2cl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2cl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2cl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2cl_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2cl_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2cl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2cl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m8_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2ms.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2ms.c new file mode 100644 index 000000000..df4ef75fc --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2ms.c @@ -0,0 +1,47 @@ +#include +#include + +vuint32mf2_t test_vsha2ms_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ms_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ms_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ms_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ms_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ms_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ms_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ms_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ms_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m8_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsm3c.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm3c.c new file mode 100644 index 000000000..b8642b667 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm3c.c @@ -0,0 +1,23 @@ +#include +#include + +vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm3c_vi_u32mf2_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm3c_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m1_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm3c_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m2_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m4_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m8_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsm3me.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm3me.c new file mode 100644 index 000000000..9b9615adb --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm3me.c @@ -0,0 +1,27 @@ +#include +#include + +vuint32mf2_t test_vsm3me_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsm3me_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsm3me_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsm3me_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsm3me_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m8_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4k.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4k.c new file mode 100644 index 000000000..ae36c36e4 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4k.c @@ -0,0 +1,23 @@ +#include +#include + +vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4k_vi_u32mf2_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m1_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m2_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m4_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m8_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4r.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4r.c new file mode 100644 index 000000000..9dcdb8818 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4r.c @@ -0,0 +1,93 @@ +#include +#include + +vuint32mf2_t test_vsm4r_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m4_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m4_u32m8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m8_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vwsll.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vwsll.c new file mode 100644 index 000000000..b93b19f8d --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vwsll.c @@ -0,0 +1,639 @@ +#include +#include + +vuint16mf4_t test_vwsll_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16m1_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16m2_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16m4_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16m8_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u32m1_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u32m2_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u32m4_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u32m8_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u64m1_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u64m2_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u64m4_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u64m8_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u64m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u64m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u64m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u64m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u64m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u64m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u64m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.adoc b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.adoc new file mode 100644 index 000000000..1aa6eba69 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.adoc @@ -0,0 +1,3245 @@ + +=== Zvbb - Vector Bit-manipulation used in Cryptography + +[[policy-variant-]] +==== Vector Bit-manipulation used in Cryptography - Bitwise And-Not + +[,c] +---- +vuint8mf8_t __riscv_vandn_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, + size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, + size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, + size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, + size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, + size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, + size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, + size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, + size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, + size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, + size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, + size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, + vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, + vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, + size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, + size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, + size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, + size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, + size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, + size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, + size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, + size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- + +[[policy-variant-]] +==== Vector Basic Bit-manipulation - Reverse + +[,c] +---- +vuint8mf8_t __riscv_vbrev_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, + vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +---- + +[[policy-variant-]] +==== Vector Basic Bit-manipulation - Count Bits + +[,c] +---- +vuint8mf8_t __riscv_vclz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +---- + +[[policy-variant-]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +---- + +[[policy-variant-]] +==== Vector Bit-manipulation used in Cryptography - Rotate + +[,c] +---- +vuint8mf8_t __riscv_vrol_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, + size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, + size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, + size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, + size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, + size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, + size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, + size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, + size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, + size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, + size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, + size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, + size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, + size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, + size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl); +---- + +[[policy-variant-]] +==== Vector Basic Bit-manipulation used - Widening Shift + +[,c] +---- +vuint16mf4_t __riscv_vwsll_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t rs1, + size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t rs1, + size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t rs1, + size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t rs1, + size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t rs1, + size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t rs1, + size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t rs1, + size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t rs1, + size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t rs1, + size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl); +---- + +=== Zvbc - Vector Carryless Multiplication + +[[policy-variant-]] +==== Vector Carryless Multiplication + +[,c] +---- +vuint64m1_t __riscv_vclmul_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl); +---- + +=== Zvkg - Vector GCM/GMAC + +[[policy-variant-]] +==== Vector GCM/GMAC + +[,c] +---- +vuint32mf2_t __riscv_vghsh_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vghsh_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vghsh_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vghsh_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vghsh_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vgmul_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vgmul_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vgmul_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vgmul_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vgmul_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +---- + +=== Zvkned - NIST Suite: Vector AES Block Cipher + +[[policy-variant-]] +==== Vector AES Encryption + +[,c] +---- +vuint32mf2_t __riscv_vaesef_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32mf2_t __riscv_vaesef_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vaesem_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32mf2_t __riscv_vaesem_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +---- + +[[policy-variant-]] +==== Vector AES Decryption + +[,c] +---- +vuint32mf2_t __riscv_vaesdf_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32mf2_t __riscv_vaesdf_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vaesdm_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32mf2_t __riscv_vaesdm_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +---- + +[[policy-variant-]] +==== Vector AES-128 Forward KeySchedule generation + +[,c] +---- +vuint32mf2_t __riscv_vaeskf1_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf1_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf1_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf1_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf1_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t uimm, size_t vl); +vuint32mf2_t __riscv_vaeskf2_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf2_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf2_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf2_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf2_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t uimm, size_t vl); +---- + +[[policy-variant-]] +==== Vector AES round zero + +[,c] +---- +vuint32mf2_t __riscv_vaesz_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl); +---- + +=== Zvknh - NIST Suite: Vector SHA-2 Secure Hash + +[[policy-variant-]] +==== Vector SHA-2 message schedule + +[,c] +---- +vuint32mf2_t __riscv_vsha2ms_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ms_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ms_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ms_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ms_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ms_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ms_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ms_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ms_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +---- + +[[policy-variant-]] +==== Vector SHA-2 two rounds of compression + +[,c] +---- +vuint32mf2_t __riscv_vsha2ch_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ch_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ch_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ch_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ch_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ch_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ch_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ch_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ch_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vsha2cl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2cl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2cl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2cl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2cl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2cl_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2cl_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2cl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2cl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +---- + +=== Zvksed - ShangMi Suite: SM4 Block Cipher + +[[policy-variant-]] +==== Vector SM4 KeyExpansion + +[,c] +---- +vuint32mf2_t __riscv_vsm4k_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm4k_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm4k_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm4k_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm4k_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t uimm, size_t vl); +---- + +[[policy-variant-]] +==== Vector SM4 Rounds + +[,c] +---- +vuint32mf2_t __riscv_vsm4r_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32mf2_t __riscv_vsm4r_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vsm4r_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vsm4r_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vsm4r_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vsm4r_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +---- + +=== Zvksh - ShangMi Suite: SM3 Secure Hash + +[[policy-variant-]] +==== Vector SM3 Message Expansion + +[,c] +---- +vuint32mf2_t __riscv_vsm3me_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsm3me_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsm3me_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsm3me_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsm3me_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +---- + +[[policy-variant-]] +==== Vector SM3 Compression + +[,c] +---- +vuint32mf2_t __riscv_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm3c_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm3c_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t uimm, size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc new file mode 100644 index 000000000..1f4e9675c --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc @@ -0,0 +1,2628 @@ + +=== Zvbb - Vector Bit-manipulation used in Cryptography + +[[policy-variant-]] +==== Vector Bit-manipulation used in Cryptography - Bitwise And-Not + +[,c] +---- +vuint8mf8_t __riscv_vandn_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, + size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, + size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, + size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, + size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, + size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, + size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, + size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, + size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, + size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, + size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, + size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, + vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, + vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, + size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, + size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, + size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, + size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, + size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, + size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, + size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, + size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- + +[[policy-variant-]] +==== Vector Basic Bit-manipulation - Reverse + +[,c] +---- +vuint8mf8_t __riscv_vbrev_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, + vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +---- + +[[policy-variant-]] +==== Vector Basic Bit-manipulation - Count Bits + +[,c] +---- +vuint8mf8_t __riscv_vclz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +---- + +[[policy-variant-]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl); +---- + +[[policy-variant-]] +==== Vector Bit-manipulation used in Cryptography - Rotate + +[,c] +---- +vuint8mf8_t __riscv_vrol_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, + size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, + size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, + size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, + size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, + size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, + size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, + size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, + size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, + size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, + size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, + size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, + size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, + size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, + size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl); +---- + +[[policy-variant-]] +==== Vector Basic Bit-manipulation used - Widening Shift + +[,c] +---- +vuint16mf4_t __riscv_vwsll_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t rs1, + size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t rs1, + size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t rs1, + size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t rs1, + size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t rs1, + size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t rs1, + size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t rs1, + size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t rs1, + size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t rs1, + size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc new file mode 100644 index 000000000..110d9a175 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc @@ -0,0 +1,188 @@ + +=== Zvbc - Vector Carryless Multiplication + +[[policy-variant-]] +==== Vector Carryless Multiplication + +[,c] +---- +vuint64m1_t __riscv_vclmul_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc new file mode 100644 index 000000000..f17b3da41 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc @@ -0,0 +1,29 @@ + +=== Zvkg - Vector GCM/GMAC + +[[policy-variant-]] +==== Vector GCM/GMAC + +[,c] +---- +vuint32mf2_t __riscv_vghsh_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vghsh_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vghsh_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vghsh_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vghsh_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vgmul_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vgmul_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vgmul_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vgmul_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vgmul_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc new file mode 100644 index 000000000..cbed68346 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc @@ -0,0 +1,230 @@ + +=== Zvkned - NIST Suite: Vector AES Block Cipher + +[[policy-variant-]] +==== Vector AES Encryption + +[,c] +---- +vuint32mf2_t __riscv_vaesef_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32mf2_t __riscv_vaesef_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vaesem_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32mf2_t __riscv_vaesem_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +---- + +[[policy-variant-]] +==== Vector AES Decryption + +[,c] +---- +vuint32mf2_t __riscv_vaesdf_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32mf2_t __riscv_vaesdf_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vaesdm_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32mf2_t __riscv_vaesdm_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +---- + +[[policy-variant-]] +==== Vector AES-128 Forward KeySchedule generation + +[,c] +---- +vuint32mf2_t __riscv_vaeskf1_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf1_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf1_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf1_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf1_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t uimm, size_t vl); +vuint32mf2_t __riscv_vaeskf2_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf2_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf2_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf2_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf2_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t uimm, size_t vl); +---- + +[[policy-variant-]] +==== Vector AES round zero + +[,c] +---- +vuint32mf2_t __riscv_vaesz_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc new file mode 100644 index 000000000..114525658 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc @@ -0,0 +1,70 @@ + +=== Zvknh - NIST Suite: Vector SHA-2 Secure Hash + +[[policy-variant-]] +==== Vector SHA-2 message schedule + +[,c] +---- +vuint32mf2_t __riscv_vsha2ms_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ms_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ms_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ms_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ms_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ms_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ms_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ms_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ms_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +---- + +[[policy-variant-]] +==== Vector SHA-2 two rounds of compression + +[,c] +---- +vuint32mf2_t __riscv_vsha2ch_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ch_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ch_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ch_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ch_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ch_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ch_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ch_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ch_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vsha2cl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2cl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2cl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2cl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2cl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2cl_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2cl_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2cl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2cl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc new file mode 100644 index 000000000..11c2d5b61 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc @@ -0,0 +1,64 @@ + +=== Zvksed - ShangMi Suite: SM4 Block Cipher + +[[policy-variant-]] +==== Vector SM4 KeyExpansion + +[,c] +---- +vuint32mf2_t __riscv_vsm4k_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm4k_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm4k_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm4k_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm4k_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t uimm, size_t vl); +---- + +[[policy-variant-]] +==== Vector SM4 Rounds + +[,c] +---- +vuint32mf2_t __riscv_vsm4r_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32mf2_t __riscv_vsm4r_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vsm4r_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vsm4r_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vsm4r_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vsm4r_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc new file mode 100644 index 000000000..bd548f60a --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc @@ -0,0 +1,36 @@ + +=== Zvksh - ShangMi Suite: SM3 Secure Hash + +[[policy-variant-]] +==== Vector SM3 Message Expansion + +[,c] +---- +vuint32mf2_t __riscv_vsm3me_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsm3me_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsm3me_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsm3me_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsm3me_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +---- + +[[policy-variant-]] +==== Vector SM3 Compression + +[,c] +---- +vuint32mf2_t __riscv_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm3c_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm3c_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t uimm, size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdf.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdf.c new file mode 100644 index 000000000..095eecc39 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdf.c @@ -0,0 +1,105 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesdf_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m4_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m4_u32m8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_u32m8_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdm.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdm.c new file mode 100644 index 000000000..a05299453 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdm.c @@ -0,0 +1,105 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesdm_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m4_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m4_u32m8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_u32m8_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesef.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesef.c new file mode 100644 index 000000000..a83cb7537 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesef.c @@ -0,0 +1,105 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesef_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesef_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m4_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m4_u32m8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaesef_vv_u32m8_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesem.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesem.c new file mode 100644 index 000000000..694bdd477 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesem.c @@ -0,0 +1,105 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesem_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesem_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m4_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m4_u32m8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaesem_vv_u32m8_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf1.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf1.c new file mode 100644 index 000000000..ef48dab61 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf1.c @@ -0,0 +1,35 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaeskf1_vi_u32mf2_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaeskf1_vi_u32m1_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaeskf1_vi_u32m2_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaeskf1_vi_u32m4_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaeskf1_vi_u32m8_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf2.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf2.c new file mode 100644 index 000000000..8fb8dc865 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf2.c @@ -0,0 +1,35 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaeskf2_vi_u32mf2_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaeskf2_vi_u32m1_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaeskf2_vi_u32m2_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaeskf2_vi_u32m4_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaeskf2_vi_u32m8_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesz.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesz.c new file mode 100644 index 000000000..dc8a1ae53 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesz.c @@ -0,0 +1,80 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m4_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m4_u32m8_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vandn.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vandn.c new file mode 100644 index 000000000..99552ff27 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vandn.c @@ -0,0 +1,946 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vandn_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_tu(vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8mf8_tu(vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_tu(vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8mf4_tu(vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_tu(vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8mf2_tu(vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8m1_tu(vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8m1_tu(vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8m2_tu(vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8m2_tu(vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8m4_tu(vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8m4_tu(vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8m8_tu(vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8m8_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_tu(vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vandn_vv_u64m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u64m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u64m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u64m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vandn_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vandn_vv_u64m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u64m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u64m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev.c new file mode 100644 index 000000000..f83e035af --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev.c @@ -0,0 +1,430 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vbrev_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vbrev_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vbrev_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vbrev_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vbrev_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vbrev_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vbrev_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vbrev_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vbrev_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vbrev_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vbrev_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vbrev_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vbrev_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vbrev_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vbrev_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vbrev_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vbrev_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vbrev_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vbrev_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vbrev_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vbrev_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vbrev_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vbrev_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vbrev_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vbrev_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev8.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev8.c new file mode 100644 index 000000000..0a168be50 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev8.c @@ -0,0 +1,430 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vbrev8_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vbrev8_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmul.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmul.c new file mode 100644 index 000000000..33bebbec7 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmul.c @@ -0,0 +1,185 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint64m1_t test_vclmul_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmul_vv_u64m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmul_vv_u64m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmul_vv_u64m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmul_vv_u64m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmul_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmul_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmul_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmul_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmul_vv_u64m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmul_vv_u64m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmul_vv_u64m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmul_vv_u64m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmulh.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmulh.c new file mode 100644 index 000000000..414bb847b --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmulh.c @@ -0,0 +1,189 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint64m1_t test_vclmulh_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmulh_vv_u64m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmulh_vv_u64m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmulh_vv_u64m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmulh_vv_u64m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmulh_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmulh_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmulh_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmulh_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmulh_vv_u64m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmulh_vv_u64m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmulh_vv_u64m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmulh_vv_u64m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclz.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclz.c new file mode 100644 index 000000000..dd89af321 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclz.c @@ -0,0 +1,430 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vclz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vclz_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vclz_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vclz_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vclz_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vclz_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vclz_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vclz_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vclz_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vclz_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vclz_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vclz_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vclz_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vclz_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vclz_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vclz_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vclz_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vclz_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vclz_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vclz_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vclz_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vclz_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vclz_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vclz_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vclz_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vclz_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vclz_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vclz_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vclz_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vclz_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vclz_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vclz_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vclz_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vclz_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vclz_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vclz_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vclz_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vclz_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vclz_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vclz_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vclz_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vclz_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vclz_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vclz_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vclz_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vclz_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vclz_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vclz_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vclz_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vclz_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vclz_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vclz_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vcpop.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vcpop.c new file mode 100644 index 000000000..e152d352b --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vcpop.c @@ -0,0 +1,430 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vcpop_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vcpop_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vcpop_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vcpop_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vcpop_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vcpop_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vcpop_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vcpop_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vcpop_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vcpop_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vcpop_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vcpop_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vcpop_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vcpop_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vcpop_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vcpop_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vcpop_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vcpop_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vcpop_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vcpop_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vcpop_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vcpop_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vcpop_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vcpop_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vcpop_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vcpop_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vcpop_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vcpop_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vcpop_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vcpop_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vcpop_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vcpop_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vcpop_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vcpop_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vcpop_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vcpop_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vcpop_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vctz.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vctz.c new file mode 100644 index 000000000..27ee5ed9f --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vctz.c @@ -0,0 +1,430 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vctz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vctz_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vctz_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vctz_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vctz_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vctz_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vctz_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vctz_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vctz_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vctz_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vctz_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vctz_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vctz_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vctz_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vctz_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vctz_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vctz_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vctz_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vctz_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vctz_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vctz_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vctz_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vctz_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vctz_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vctz_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vctz_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vctz_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vctz_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vctz_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vctz_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vctz_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vctz_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vctz_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vctz_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vctz_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vctz_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vctz_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vctz_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vctz_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vctz_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vctz_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vctz_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vctz_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vctz_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vctz_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vctz_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vctz_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vctz_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vctz_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vctz_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vctz_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vctz_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vghsh.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vghsh.c new file mode 100644 index 000000000..b2f1ea776 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vghsh.c @@ -0,0 +1,35 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vghsh_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vghsh_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vghsh_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vghsh_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vghsh_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m8_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vgmul.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vgmul.c new file mode 100644 index 000000000..962268b6c --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vgmul.c @@ -0,0 +1,31 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vgmul_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vgmul_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vgmul_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vgmul_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vgmul_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vgmul_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m8_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrev8.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrev8.c new file mode 100644 index 000000000..6c9e84219 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrev8.c @@ -0,0 +1,430 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vrev8_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vrev8_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vrev8_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vrev8_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vrev8_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vrev8_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vrev8_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vrev8_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vrev8_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vrev8_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vrev8_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vrev8_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vrev8_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vrev8_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vrev8_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vrev8_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vrev8_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vrev8_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vrev8_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vrev8_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vrev8_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vrev8_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vrev8_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vrev8_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vrev8_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vrev8_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vrev8_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vrev8_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vrev8_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vrev8_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vrev8_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vrev8_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vrev8_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vrev8_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vrev8_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vrev8_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vrev8_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrol.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrol.c new file mode 100644 index 000000000..7c3c2336e --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrol.c @@ -0,0 +1,922 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkb \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vrol_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_tu(vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8mf8_tu(vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_tu(vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8mf4_tu(vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_tu(vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8mf2_tu(vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8m1_tu(vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8m1_tu(vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8m2_tu(vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8m2_tu(vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8m4_tu(vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8m4_tu(vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8m8_tu(vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8m8_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16m1_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16m2_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16m4_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16m8_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u32m1_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u32m2_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u32m4_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u32m8_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u64m1_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u64m2_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u64m4_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u64m8_tu(vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vrol_vv_u64m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u64m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u64m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vrol_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vror.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vror.c new file mode 100644 index 000000000..20a976d46 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vror.c @@ -0,0 +1,922 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkb \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vror_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_tu(vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8mf8_tu(vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_tu(vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8mf4_tu(vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_tu(vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8mf2_tu(vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vror_vv_u8m1_tu(vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8m1_tu(vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vror_vv_u8m2_tu(vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8m2_tu(vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vror_vv_u8m4_tu(vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8m4_tu(vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vror_vv_u8m8_tu(vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8m8_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16m1_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16m2_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16m4_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16m8_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u32m1_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u32m2_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u32m4_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u32m8_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u64m1_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u64m2_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u64m4_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u64m8_tu(vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vror_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vror_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vror_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vror_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vror_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vror_vv_u16m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vror_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vror_vv_u32m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vror_vv_u32m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vror_vv_u64m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vror_vv_u64m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vror_vv_u64m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vror_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vror_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vror_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vror_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vror_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vror_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vror_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vror_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vror_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vror_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vror_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vror_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vror_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vror_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vror_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vror_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vror_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vror_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vror_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vror_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vror_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ch.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ch.c new file mode 100644 index 000000000..8c3e787d9 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ch.c @@ -0,0 +1,55 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2ch_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ch_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ch_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ch_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ch_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ch_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ch_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ch_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ch_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m8_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2cl.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2cl.c new file mode 100644 index 000000000..62a1b3541 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2cl.c @@ -0,0 +1,55 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2cl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2cl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2cl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2cl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2cl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2cl_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2cl_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2cl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2cl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m8_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ms.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ms.c new file mode 100644 index 000000000..0995653a9 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ms.c @@ -0,0 +1,55 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2ms_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ms_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ms_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ms_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ms_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ms_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ms_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ms_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ms_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m8_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3c.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3c.c new file mode 100644 index 000000000..22a8847ec --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3c.c @@ -0,0 +1,31 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvksh \ +// RUN: -target-feature +zvl512b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm3c_vi_u32mf2_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm3c_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m1_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm3c_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m2_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m4_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m8_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3me.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3me.c new file mode 100644 index 000000000..40c72778e --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3me.c @@ -0,0 +1,35 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvksh \ +// RUN: -target-feature +zvl512b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm3me_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsm3me_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsm3me_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsm3me_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsm3me_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m8_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4k.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4k.c new file mode 100644 index 000000000..2666c99b4 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4k.c @@ -0,0 +1,31 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4k_vi_u32mf2_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m1_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m2_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m4_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m8_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4r.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4r.c new file mode 100644 index 000000000..297482bb4 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4r.c @@ -0,0 +1,101 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm4r_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m4_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m4_u32m8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m8_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vwsll.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vwsll.c new file mode 100644 index 000000000..81c7d7ff0 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vwsll.c @@ -0,0 +1,646 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint16mf4_t test_vwsll_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16m1_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16m2_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16m4_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16m8_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u32m1_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u32m2_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u32m4_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u32m8_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u64m1_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u64m2_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u64m4_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u64m8_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u64m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u64m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u64m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u64m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u64m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u64m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u64m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdf.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdf.c new file mode 100644 index 000000000..b6d6c71d9 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdf.c @@ -0,0 +1,105 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesdf_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdm.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdm.c new file mode 100644 index 000000000..0b62e82bc --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdm.c @@ -0,0 +1,105 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesdm_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesef.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesef.c new file mode 100644 index 000000000..1e38c9d0c --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesef.c @@ -0,0 +1,105 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesef_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vv_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vv_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vv_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesef_vv_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaesef_vv_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesem.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesem.c new file mode 100644 index 000000000..0016f2c53 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesem.c @@ -0,0 +1,105 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesem_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vv_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vv_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vv_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesem_vv_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaesem_vv_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf1.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf1.c new file mode 100644 index 000000000..f238e28a9 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf1.c @@ -0,0 +1,35 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaeskf1_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaeskf1_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaeskf1_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaeskf1_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaeskf1_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf2.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf2.c new file mode 100644 index 000000000..17143962f --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf2.c @@ -0,0 +1,35 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesz.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesz.c new file mode 100644 index 000000000..261636582 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesz.c @@ -0,0 +1,80 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vandn.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vandn.c new file mode 100644 index 000000000..b033e4854 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vandn.c @@ -0,0 +1,946 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vandn_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev.c new file mode 100644 index 000000000..3f387f699 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev.c @@ -0,0 +1,430 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vbrev_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev8.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev8.c new file mode 100644 index 000000000..5b5370b16 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev8.c @@ -0,0 +1,430 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vbrev8_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmul.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmul.c new file mode 100644 index 000000000..d4d4fefef --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmul.c @@ -0,0 +1,185 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint64m1_t test_vclmul_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmulh.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmulh.c new file mode 100644 index 000000000..ae564fff3 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmulh.c @@ -0,0 +1,189 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint64m1_t test_vclmulh_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclz.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclz.c new file mode 100644 index 000000000..c22c4a4d1 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclz.c @@ -0,0 +1,430 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vclz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vcpop.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vcpop.c new file mode 100644 index 000000000..43f0b9a1b --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vcpop.c @@ -0,0 +1,430 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vcpop_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vctz.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vctz.c new file mode 100644 index 000000000..b0e5600ff --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vctz.c @@ -0,0 +1,430 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vctz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vghsh.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vghsh.c new file mode 100644 index 000000000..cb4cacaee --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vghsh.c @@ -0,0 +1,35 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vghsh_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vghsh_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vghsh_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vghsh_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vghsh_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vgmul.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vgmul.c new file mode 100644 index 000000000..650dba88f --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vgmul.c @@ -0,0 +1,31 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vgmul_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} + +vuint32m1_t test_vgmul_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} + +vuint32m2_t test_vgmul_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} + +vuint32m4_t test_vgmul_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} + +vuint32m8_t test_vgmul_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrev8.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrev8.c new file mode 100644 index 000000000..2167a18c3 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrev8.c @@ -0,0 +1,430 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vrev8_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrol.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrol.c new file mode 100644 index 000000000..bf4c23a9b --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrol.c @@ -0,0 +1,922 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkb \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vrol_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vror.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vror.c new file mode 100644 index 000000000..cf90f3590 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vror.c @@ -0,0 +1,922 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvkb \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vror_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ch.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ch.c new file mode 100644 index 000000000..7aec85acb --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ch.c @@ -0,0 +1,55 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2ch_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ch_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ch_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ch_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ch_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ch_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ch_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ch_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ch_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2cl.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2cl.c new file mode 100644 index 000000000..e9931822b --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2cl.c @@ -0,0 +1,55 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2cl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2cl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2cl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2cl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2cl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2cl_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2cl_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2cl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2cl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ms.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ms.c new file mode 100644 index 000000000..1538fd2c4 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ms.c @@ -0,0 +1,55 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2ms_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ms_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ms_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ms_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ms_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ms_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ms_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ms_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ms_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3c.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3c.c new file mode 100644 index 000000000..e2eeac8fa --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3c.c @@ -0,0 +1,31 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvksh \ +// RUN: -target-feature +zvl512b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm3c_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm3c_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3me.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3me.c new file mode 100644 index 000000000..d8c4eb158 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3me.c @@ -0,0 +1,35 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvksh \ +// RUN: -target-feature +zvl512b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm3me_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsm3me_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsm3me_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsm3me_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsm3me_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsm3me_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4k.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4k.c new file mode 100644 index 000000000..d4e9267d8 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4k.c @@ -0,0 +1,31 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4k_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4k_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4k_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4k_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4k_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4r.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4r.c new file mode 100644 index 000000000..2403a6f60 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4r.c @@ -0,0 +1,101 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvl256b \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm4r_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vwsll.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vwsll.c new file mode 100644 index 000000000..04949b9ca --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vwsll.c @@ -0,0 +1,646 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zve64x \ +// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint16mf4_t test_vwsll_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdf.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdf.c new file mode 100644 index 000000000..1c1f98128 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdf.c @@ -0,0 +1,97 @@ +#include +#include + +vuint32mf2_t test_vaesdf_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdm.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdm.c new file mode 100644 index 000000000..2eb7f3517 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdm.c @@ -0,0 +1,97 @@ +#include +#include + +vuint32mf2_t test_vaesdm_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesef.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesef.c new file mode 100644 index 000000000..bd17e9ddc --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesef.c @@ -0,0 +1,97 @@ +#include +#include + +vuint32mf2_t test_vaesef_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vv_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vv_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vv_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesef_vv_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaesef_vv_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesem.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesem.c new file mode 100644 index 000000000..fdbb66b41 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesem.c @@ -0,0 +1,97 @@ +#include +#include + +vuint32mf2_t test_vaesem_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vv_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vv_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vv_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesem_vv_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaesem_vv_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf1.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf1.c new file mode 100644 index 000000000..8f11194f5 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf1.c @@ -0,0 +1,27 @@ +#include +#include + +vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaeskf1_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaeskf1_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaeskf1_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaeskf1_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaeskf1_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf2.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf2.c new file mode 100644 index 000000000..2f71a4d13 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf2.c @@ -0,0 +1,27 @@ +#include +#include + +vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesz.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesz.c new file mode 100644 index 000000000..6687ccb2c --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesz.c @@ -0,0 +1,72 @@ +#include +#include + +vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vandn.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vandn.c new file mode 100644 index 000000000..73315e18a --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vandn.c @@ -0,0 +1,939 @@ +#include +#include + +vuint8mf8_t test_vandn_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vbrev.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vbrev.c new file mode 100644 index 000000000..b46e2114c --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vbrev.c @@ -0,0 +1,423 @@ +#include +#include + +vuint8mf8_t test_vbrev_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vbrev8.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vbrev8.c new file mode 100644 index 000000000..03f632695 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vbrev8.c @@ -0,0 +1,423 @@ +#include +#include + +vuint8mf8_t test_vbrev8_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclmul.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclmul.c new file mode 100644 index 000000000..488ab2300 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclmul.c @@ -0,0 +1,178 @@ +#include +#include + +vuint64m1_t test_vclmul_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclmulh.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclmulh.c new file mode 100644 index 000000000..06a287746 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclmulh.c @@ -0,0 +1,182 @@ +#include +#include + +vuint64m1_t test_vclmulh_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclz.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclz.c new file mode 100644 index 000000000..2d8b78be7 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclz.c @@ -0,0 +1,423 @@ +#include +#include + +vuint8mf8_t test_vclz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vcpop.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vcpop.c new file mode 100644 index 000000000..10f897107 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vcpop.c @@ -0,0 +1,423 @@ +#include +#include + +vuint8mf8_t test_vcpop_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vctz.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vctz.c new file mode 100644 index 000000000..3e0bce679 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vctz.c @@ -0,0 +1,423 @@ +#include +#include + +vuint8mf8_t test_vctz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vghsh.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vghsh.c new file mode 100644 index 000000000..a346e788f --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vghsh.c @@ -0,0 +1,27 @@ +#include +#include + +vuint32mf2_t test_vghsh_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vghsh_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vghsh_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vghsh_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vghsh_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vgmul.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vgmul.c new file mode 100644 index 000000000..fc282ac60 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vgmul.c @@ -0,0 +1,23 @@ +#include +#include + +vuint32mf2_t test_vgmul_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} + +vuint32m1_t test_vgmul_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} + +vuint32m2_t test_vgmul_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} + +vuint32m4_t test_vgmul_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} + +vuint32m8_t test_vgmul_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vrev8.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vrev8.c new file mode 100644 index 000000000..d56e3555e --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vrev8.c @@ -0,0 +1,423 @@ +#include +#include + +vuint8mf8_t test_vrev8_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vrol.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vrol.c new file mode 100644 index 000000000..0f9405dcd --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vrol.c @@ -0,0 +1,915 @@ +#include +#include + +vuint8mf8_t test_vrol_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vror.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vror.c new file mode 100644 index 000000000..6f97b5a85 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vror.c @@ -0,0 +1,915 @@ +#include +#include + +vuint8mf8_t test_vror_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2ch.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2ch.c new file mode 100644 index 000000000..eb2435d9c --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2ch.c @@ -0,0 +1,47 @@ +#include +#include + +vuint32mf2_t test_vsha2ch_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ch_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ch_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ch_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ch_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ch_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ch_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ch_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ch_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2cl.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2cl.c new file mode 100644 index 000000000..f657a7901 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2cl.c @@ -0,0 +1,47 @@ +#include +#include + +vuint32mf2_t test_vsha2cl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2cl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2cl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2cl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2cl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2cl_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2cl_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2cl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2cl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2ms.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2ms.c new file mode 100644 index 000000000..349f16c5b --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2ms.c @@ -0,0 +1,47 @@ +#include +#include + +vuint32mf2_t test_vsha2ms_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ms_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ms_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ms_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ms_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ms_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ms_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ms_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ms_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3c.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3c.c new file mode 100644 index 000000000..1778de96a --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3c.c @@ -0,0 +1,23 @@ +#include +#include + +vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm3c_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm3c_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3me.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3me.c new file mode 100644 index 000000000..f4536867d --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3me.c @@ -0,0 +1,27 @@ +#include +#include + +vuint32mf2_t test_vsm3me_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsm3me_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsm3me_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsm3me_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsm3me_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsm3me_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4k.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4k.c new file mode 100644 index 000000000..ac789ac44 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4k.c @@ -0,0 +1,23 @@ +#include +#include + +vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4k_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4k_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4k_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4k_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4k_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4r.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4r.c new file mode 100644 index 000000000..46cf176d3 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4r.c @@ -0,0 +1,93 @@ +#include +#include + +vuint32mf2_t test_vsm4r_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vwsll.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vwsll.c new file mode 100644 index 000000000..e3736d299 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vwsll.c @@ -0,0 +1,639 @@ +#include +#include + +vuint16mf4_t test_vwsll_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.adoc b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.adoc new file mode 100644 index 000000000..4909949a9 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.adoc @@ -0,0 +1,2737 @@ + +=== Zvbb - Vector Bit-manipulation used in Cryptography + +[[policy-variant-overloaded]] +==== Vector Bit-manipulation used in Cryptography - Bitwise And-Not + +[,c] +---- +vuint8mf8_t __riscv_vandn_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint8mf8_t __riscv_vandn_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, + size_t vl); +vuint8mf4_t __riscv_vandn_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint8mf4_t __riscv_vandn_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, + size_t vl); +vuint8mf2_t __riscv_vandn_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint8mf2_t __riscv_vandn_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, + size_t vl); +vuint8m1_t __riscv_vandn_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl); +vuint8m1_t __riscv_vandn_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl); +vuint8m2_t __riscv_vandn_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl); +vuint8m2_t __riscv_vandn_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl); +vuint8m4_t __riscv_vandn_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl); +vuint8m4_t __riscv_vandn_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl); +vuint8m8_t __riscv_vandn_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl); +vuint8m8_t __riscv_vandn_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + size_t vl); +vuint16mf4_t __riscv_vandn_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, + size_t vl); +vuint16mf2_t __riscv_vandn_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, + size_t vl); +vuint16m1_t __riscv_vandn_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint16m1_t __riscv_vandn_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, + size_t vl); +vuint16m2_t __riscv_vandn_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint16m2_t __riscv_vandn_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, + size_t vl); +vuint16m4_t __riscv_vandn_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint16m4_t __riscv_vandn_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, + size_t vl); +vuint16m8_t __riscv_vandn_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl); +vuint16m8_t __riscv_vandn_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, + size_t vl); +vuint32mf2_t __riscv_vandn_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, + size_t vl); +vuint32m1_t __riscv_vandn_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m1_t __riscv_vandn_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, + size_t vl); +vuint32m2_t __riscv_vandn_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m2_t __riscv_vandn_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, + size_t vl); +vuint32m4_t __riscv_vandn_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m4_t __riscv_vandn_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, + size_t vl); +vuint32m8_t __riscv_vandn_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32m8_t __riscv_vandn_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, + size_t vl); +vuint64m1_t __riscv_vandn_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vandn_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, + size_t vl); +vuint64m2_t __riscv_vandn_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vandn_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, + size_t vl); +vuint64m4_t __riscv_vandn_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vandn_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, + size_t vl); +vuint64m8_t __riscv_vandn_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vandn_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, + size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector Basic Bit-manipulation - Reverse + +[,c] +---- +vuint8mf8_t __riscv_vbrev_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vbrev_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vbrev_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vbrev_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vbrev_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vbrev_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vbrev_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vbrev_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vbrev_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vbrev_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vbrev_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vbrev_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vbrev_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vbrev_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vbrev_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vbrev_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vbrev_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vbrev_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vbrev_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vbrev_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vbrev_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vbrev_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +vuint8mf8_t __riscv_vbrev8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vbrev8_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vbrev8_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vbrev8_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vbrev8_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vbrev8_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vbrev8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vbrev8_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vbrev8_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vbrev8_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vbrev8_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vbrev8_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vbrev8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vbrev8_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vbrev8_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vbrev8_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vbrev8_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vbrev8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vbrev8_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vbrev8_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vbrev8_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vbrev8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +vuint8mf8_t __riscv_vrev8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vrev8_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vrev8_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vrev8_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vrev8_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vrev8_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vrev8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vrev8_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vrev8_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vrev8_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vrev8_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vrev8_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vrev8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vrev8_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vrev8_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vrev8_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vrev8_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vrev8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vrev8_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vrev8_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vrev8_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vrev8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vbrev_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vbrev_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vbrev_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vbrev_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vbrev_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vbrev_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vbrev_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vbrev_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vbrev_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vbrev_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vbrev_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vbrev_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vbrev_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vbrev_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vbrev_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vbrev_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vbrev_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vbrev_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vbrev_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vbrev_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vbrev_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +vuint8mf8_t __riscv_vbrev8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vbrev8_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vbrev8_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vbrev8_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vbrev8_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vbrev8_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vbrev8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vbrev8_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vbrev8_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vbrev8_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vbrev8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vbrev8_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vbrev8_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vbrev8_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vbrev8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vbrev8_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vbrev8_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vbrev8_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vbrev8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +vuint8mf8_t __riscv_vrev8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vrev8_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vrev8_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vrev8_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vrev8_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vrev8_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vrev8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vrev8_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vrev8_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vrev8_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vrev8_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vrev8_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vrev8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vrev8_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vrev8_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vrev8_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vrev8_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vrev8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vrev8_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vrev8_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vrev8_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vrev8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vbrev_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vbrev_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vbrev_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vbrev_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vbrev_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vbrev_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vbrev_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vbrev_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vbrev_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vbrev_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vbrev_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vbrev_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vbrev_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vbrev_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vbrev_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vbrev_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vbrev_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vbrev_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vbrev_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vbrev_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vbrev_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +vuint8mf8_t __riscv_vbrev8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vbrev8_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vbrev8_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vbrev8_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vbrev8_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vbrev8_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vbrev8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vbrev8_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vbrev8_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vbrev8_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vbrev8_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vbrev8_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vbrev8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vbrev8_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vbrev8_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vbrev8_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vbrev8_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vbrev8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vbrev8_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vbrev8_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vbrev8_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vbrev8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +vuint8mf8_t __riscv_vrev8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vrev8_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vrev8_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vrev8_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vrev8_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vrev8_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vrev8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vrev8_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vrev8_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vrev8_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vrev8_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vrev8_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vrev8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vrev8_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vrev8_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vrev8_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vrev8_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vrev8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vrev8_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vrev8_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vrev8_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vrev8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector Basic Bit-manipulation - Count Bits + +[,c] +---- +vuint8mf8_t __riscv_vclz_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vclz_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vclz_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vclz_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vclz_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vclz_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vclz_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vclz_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vclz_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vclz_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vclz_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vclz_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vclz_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vclz_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vclz_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vclz_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vclz_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vclz_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vclz_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vclz_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vclz_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vclz_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +vuint8mf8_t __riscv_vctz_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vctz_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vctz_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vctz_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vctz_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vctz_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vctz_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vctz_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vctz_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vctz_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vctz_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vctz_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vctz_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vctz_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vctz_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vctz_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vctz_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vctz_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vctz_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vctz_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vctz_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vctz_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vclz_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vclz_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vclz_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vclz_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vclz_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vclz_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vclz_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vclz_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vclz_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vclz_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vclz_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vclz_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vclz_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vclz_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vclz_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vclz_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vclz_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vclz_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vclz_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vclz_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vclz_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +vuint8mf8_t __riscv_vctz_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vctz_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vctz_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vctz_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vctz_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vctz_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vctz_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vctz_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vctz_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vctz_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vctz_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vctz_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vctz_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vctz_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vctz_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vctz_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vctz_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vctz_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vctz_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vctz_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vctz_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vctz_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vclz_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vclz_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vclz_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vclz_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vclz_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vclz_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vclz_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vclz_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vclz_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vclz_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vclz_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vclz_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vclz_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vclz_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vclz_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vclz_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vclz_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vclz_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vclz_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vclz_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vclz_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +vuint8mf8_t __riscv_vctz_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vctz_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vctz_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vctz_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vctz_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vctz_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vctz_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vctz_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vctz_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vctz_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vctz_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vctz_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vctz_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vctz_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vctz_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vctz_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vctz_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vctz_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vctz_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vctz_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vctz_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vctz_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vcpop_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vcpop_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vcpop_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vcpop_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vcpop_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vcpop_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vcpop_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vcpop_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vcpop_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vcpop_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vcpop_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vcpop_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vcpop_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vcpop_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vcpop_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vcpop_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vcpop_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vcpop_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vcpop_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vcpop_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vcpop_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vcpop_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vcpop_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vcpop_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vcpop_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vcpop_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vcpop_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vcpop_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vcpop_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vcpop_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vcpop_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vcpop_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vcpop_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vcpop_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vcpop_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vcpop_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vcpop_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vcpop_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vcpop_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vcpop_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vcpop_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vcpop_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vcpop_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vcpop_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vcpop_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vcpop_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vcpop_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vcpop_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vcpop_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vcpop_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vcpop_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vcpop_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vcpop_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vcpop_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vcpop_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vcpop_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vcpop_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vcpop_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vcpop_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vcpop_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vcpop_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vcpop_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vcpop_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector Bit-manipulation used in Cryptography - Rotate + +[,c] +---- +vuint8mf8_t __riscv_vrol_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint8mf8_t __riscv_vrol_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, + size_t vl); +vuint8mf4_t __riscv_vrol_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint8mf4_t __riscv_vrol_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, + size_t vl); +vuint8mf2_t __riscv_vrol_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint8mf2_t __riscv_vrol_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl); +vuint8m1_t __riscv_vrol_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl); +vuint8m1_t __riscv_vrol_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl); +vuint8m2_t __riscv_vrol_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl); +vuint8m2_t __riscv_vrol_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl); +vuint8m4_t __riscv_vrol_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl); +vuint8m4_t __riscv_vrol_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl); +vuint8m8_t __riscv_vrol_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl); +vuint8m8_t __riscv_vrol_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, + size_t vl); +vuint16mf4_t __riscv_vrol_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, + size_t vl); +vuint16mf2_t __riscv_vrol_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, + size_t vl); +vuint16m1_t __riscv_vrol_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint16m1_t __riscv_vrol_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl); +vuint16m2_t __riscv_vrol_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint16m2_t __riscv_vrol_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl); +vuint16m4_t __riscv_vrol_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint16m4_t __riscv_vrol_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl); +vuint16m8_t __riscv_vrol_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl); +vuint16m8_t __riscv_vrol_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, + size_t vl); +vuint32mf2_t __riscv_vrol_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, + size_t vl); +vuint32m1_t __riscv_vrol_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m1_t __riscv_vrol_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl); +vuint32m2_t __riscv_vrol_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m2_t __riscv_vrol_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl); +vuint32m4_t __riscv_vrol_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m4_t __riscv_vrol_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl); +vuint32m8_t __riscv_vrol_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32m8_t __riscv_vrol_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, + size_t vl); +vuint64m1_t __riscv_vrol_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vrol_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, + size_t vl); +vuint64m2_t __riscv_vrol_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vrol_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, + size_t vl); +vuint64m4_t __riscv_vrol_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vrol_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, + size_t vl); +vuint64m8_t __riscv_vrol_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vrol_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, + size_t vl); +vuint8mf8_t __riscv_vror_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint8mf8_t __riscv_vror_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, + size_t vl); +vuint8mf4_t __riscv_vror_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint8mf4_t __riscv_vror_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, + size_t vl); +vuint8mf2_t __riscv_vror_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint8mf2_t __riscv_vror_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl); +vuint8m1_t __riscv_vror_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl); +vuint8m1_t __riscv_vror_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl); +vuint8m2_t __riscv_vror_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl); +vuint8m2_t __riscv_vror_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl); +vuint8m4_t __riscv_vror_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl); +vuint8m4_t __riscv_vror_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl); +vuint8m8_t __riscv_vror_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl); +vuint8m8_t __riscv_vror_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, + size_t vl); +vuint16mf4_t __riscv_vror_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, + size_t vl); +vuint16mf2_t __riscv_vror_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, + size_t vl); +vuint16m1_t __riscv_vror_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint16m1_t __riscv_vror_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl); +vuint16m2_t __riscv_vror_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint16m2_t __riscv_vror_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl); +vuint16m4_t __riscv_vror_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint16m4_t __riscv_vror_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl); +vuint16m8_t __riscv_vror_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl); +vuint16m8_t __riscv_vror_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, + size_t vl); +vuint32mf2_t __riscv_vror_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, + size_t vl); +vuint32m1_t __riscv_vror_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m1_t __riscv_vror_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl); +vuint32m2_t __riscv_vror_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m2_t __riscv_vror_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl); +vuint32m4_t __riscv_vror_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m4_t __riscv_vror_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl); +vuint32m8_t __riscv_vror_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32m8_t __riscv_vror_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, + size_t vl); +vuint64m1_t __riscv_vror_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vror_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, + size_t vl); +vuint64m2_t __riscv_vror_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vror_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, + size_t vl); +vuint64m4_t __riscv_vror_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vror_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, + size_t vl); +vuint64m8_t __riscv_vror_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vror_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, + size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector Basic Bit-manipulation used - Widening Shift + +[,c] +---- +vuint16mf4_t __riscv_vwsll_tu(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint16mf4_t __riscv_vwsll_tu(vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, + size_t vl); +vuint16mf2_t __riscv_vwsll_tu(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint16mf2_t __riscv_vwsll_tu(vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, + size_t vl); +vuint16m1_t __riscv_vwsll_tu(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint16m1_t __riscv_vwsll_tu(vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl); +vuint16m2_t __riscv_vwsll_tu(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl); +vuint16m2_t __riscv_vwsll_tu(vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl); +vuint16m4_t __riscv_vwsll_tu(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl); +vuint16m4_t __riscv_vwsll_tu(vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl); +vuint16m8_t __riscv_vwsll_tu(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl); +vuint16m8_t __riscv_vwsll_tu(vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl); +vuint32mf2_t __riscv_vwsll_tu(vuint32mf2_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tu(vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, + size_t vl); +vuint32m1_t __riscv_vwsll_tu(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint32m1_t __riscv_vwsll_tu(vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, + size_t vl); +vuint32m2_t __riscv_vwsll_tu(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint32m2_t __riscv_vwsll_tu(vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl); +vuint32m4_t __riscv_vwsll_tu(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint32m4_t __riscv_vwsll_tu(vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl); +vuint32m8_t __riscv_vwsll_tu(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint32m8_t __riscv_vwsll_tu(vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl); +vuint64m1_t __riscv_vwsll_tu(vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint64m1_t __riscv_vwsll_tu(vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, + size_t vl); +vuint64m2_t __riscv_vwsll_tu(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint64m2_t __riscv_vwsll_tu(vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl); +vuint64m4_t __riscv_vwsll_tu(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint64m4_t __riscv_vwsll_tu(vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl); +vuint64m8_t __riscv_vwsll_tu(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint64m8_t __riscv_vwsll_tu(vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl); +---- + +=== Zvbc - Vector Carryless Multiplication + +[[policy-variant-overloaded]] +==== Vector Carryless Multiplication + +[,c] +---- +vuint64m1_t __riscv_vclmul_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vclmul_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, + size_t vl); +vuint64m2_t __riscv_vclmul_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vclmul_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, + size_t vl); +vuint64m4_t __riscv_vclmul_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vclmul_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, + size_t vl); +vuint64m8_t __riscv_vclmul_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vclmul_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, + size_t vl); +vuint64m1_t __riscv_vclmulh_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vclmulh_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, + size_t vl); +vuint64m2_t __riscv_vclmulh_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vclmulh_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, + size_t vl); +vuint64m4_t __riscv_vclmulh_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vclmulh_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, + size_t vl); +vuint64m8_t __riscv_vclmulh_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vclmulh_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, + size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl); +---- + +=== Zvkg - Vector GCM/GMAC + +[[policy-variant-overloaded]] +==== Vector GCM/GMAC + +[,c] +---- +vuint32mf2_t __riscv_vghsh_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vghsh_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m2_t __riscv_vghsh_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m4_t __riscv_vghsh_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m8_t __riscv_vghsh_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32mf2_t __riscv_vgmul_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vgmul_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vgmul_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vgmul_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vgmul_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +=== Zvkned - NIST Suite: Vector AES Block Cipher + +[[policy-variant-overloaded]] +==== Vector AES Encryption + +[,c] +---- +vuint32mf2_t __riscv_vaesef_vv_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef_vs_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vv_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vv_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vv_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vv_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vv_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vs_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vv_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vv_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vv_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vv_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector AES Decryption + +[,c] +---- +vuint32mf2_t __riscv_vaesdf_vv_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf_vs_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vv_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vv_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vv_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vv_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vv_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vs_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vv_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vv_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vv_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vv_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector AES-128 Forward KeySchedule generation + +[,c] +---- +vuint32mf2_t __riscv_vaeskf1_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, + size_t vl); +vuint32m1_t __riscv_vaeskf1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, + size_t vl); +vuint32m2_t __riscv_vaeskf1_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, + size_t vl); +vuint32m4_t __riscv_vaeskf1_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, + size_t vl); +vuint32m8_t __riscv_vaeskf1_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, + size_t vl); +vuint32mf2_t __riscv_vaeskf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, + size_t vl); +vuint32m1_t __riscv_vaeskf2_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, + size_t vl); +vuint32m2_t __riscv_vaeskf2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, + size_t vl); +vuint32m4_t __riscv_vaeskf2_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, + size_t vl); +vuint32m8_t __riscv_vaeskf2_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, + size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector AES round zero + +[,c] +---- +vuint32mf2_t __riscv_vaesz_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +---- + +=== Zvknh - NIST Suite: Vector SHA-2 Secure Hash + +[[policy-variant-overloaded]] +==== Vector SHA-2 message schedule + +[,c] +---- +vuint32mf2_t __riscv_vsha2ms_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ms_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m2_t __riscv_vsha2ms_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m4_t __riscv_vsha2ms_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m8_t __riscv_vsha2ms_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint64m1_t __riscv_vsha2ms_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m2_t __riscv_vsha2ms_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m4_t __riscv_vsha2ms_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m8_t __riscv_vsha2ms_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector SHA-2 two rounds of compression + +[,c] +---- +vuint32mf2_t __riscv_vsha2ch_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ch_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m2_t __riscv_vsha2ch_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m4_t __riscv_vsha2ch_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m8_t __riscv_vsha2ch_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint64m1_t __riscv_vsha2ch_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m2_t __riscv_vsha2ch_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m4_t __riscv_vsha2ch_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m8_t __riscv_vsha2ch_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint32mf2_t __riscv_vsha2cl_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2cl_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m2_t __riscv_vsha2cl_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m4_t __riscv_vsha2cl_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m8_t __riscv_vsha2cl_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint64m1_t __riscv_vsha2cl_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m2_t __riscv_vsha2cl_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m4_t __riscv_vsha2cl_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m8_t __riscv_vsha2cl_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +---- + +=== Zvksed - ShangMi Suite: SM4 Block Cipher + +[[policy-variant-overloaded]] +==== Vector SM4 KeyExpansion + +[,c] +---- +vuint32mf2_t __riscv_vsm4k_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, + size_t vl); +vuint32m1_t __riscv_vsm4k_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, + size_t vl); +vuint32m2_t __riscv_vsm4k_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, + size_t vl); +vuint32m4_t __riscv_vsm4k_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, + size_t vl); +vuint32m8_t __riscv_vsm4k_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, + size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector SM4 Rounds + +[,c] +---- +vuint32mf2_t __riscv_vsm4r_vv_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vsm4r_vs_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vv_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vv_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vv_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vv_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +=== Zvksh - ShangMi Suite: SM3 Secure Hash + +[[policy-variant-overloaded]] +==== Vector SM3 Message Expansion + +[,c] +---- +vuint32mf2_t __riscv_vsm3me_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsm3me_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m2_t __riscv_vsm3me_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m4_t __riscv_vsm3me_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m8_t __riscv_vsm3me_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector SM3 Compression + +[,c] +---- +vuint32mf2_t __riscv_vsm3c_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, + size_t vl); +vuint32m1_t __riscv_vsm3c_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, + size_t vl); +vuint32m2_t __riscv_vsm3c_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, + size_t vl); +vuint32m4_t __riscv_vsm3c_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, + size_t vl); +vuint32m8_t __riscv_vsm3c_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, + size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc new file mode 100644 index 000000000..d10aee22d --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc @@ -0,0 +1,2282 @@ + +=== Zvbb - Vector Bit-manipulation used in Cryptography + +[[policy-variant-overloaded]] +==== Vector Bit-manipulation used in Cryptography - Bitwise And-Not + +[,c] +---- +vuint8mf8_t __riscv_vandn_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint8mf8_t __riscv_vandn_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, + size_t vl); +vuint8mf4_t __riscv_vandn_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint8mf4_t __riscv_vandn_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, + size_t vl); +vuint8mf2_t __riscv_vandn_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint8mf2_t __riscv_vandn_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, + size_t vl); +vuint8m1_t __riscv_vandn_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl); +vuint8m1_t __riscv_vandn_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl); +vuint8m2_t __riscv_vandn_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl); +vuint8m2_t __riscv_vandn_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl); +vuint8m4_t __riscv_vandn_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl); +vuint8m4_t __riscv_vandn_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl); +vuint8m8_t __riscv_vandn_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl); +vuint8m8_t __riscv_vandn_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + size_t vl); +vuint16mf4_t __riscv_vandn_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, + size_t vl); +vuint16mf2_t __riscv_vandn_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, + size_t vl); +vuint16m1_t __riscv_vandn_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint16m1_t __riscv_vandn_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, + size_t vl); +vuint16m2_t __riscv_vandn_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint16m2_t __riscv_vandn_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, + size_t vl); +vuint16m4_t __riscv_vandn_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint16m4_t __riscv_vandn_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, + size_t vl); +vuint16m8_t __riscv_vandn_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl); +vuint16m8_t __riscv_vandn_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, + size_t vl); +vuint32mf2_t __riscv_vandn_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, + size_t vl); +vuint32m1_t __riscv_vandn_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m1_t __riscv_vandn_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, + size_t vl); +vuint32m2_t __riscv_vandn_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m2_t __riscv_vandn_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, + size_t vl); +vuint32m4_t __riscv_vandn_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m4_t __riscv_vandn_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, + size_t vl); +vuint32m8_t __riscv_vandn_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32m8_t __riscv_vandn_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, + size_t vl); +vuint64m1_t __riscv_vandn_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vandn_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, + size_t vl); +vuint64m2_t __riscv_vandn_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vandn_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, + size_t vl); +vuint64m4_t __riscv_vandn_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vandn_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, + size_t vl); +vuint64m8_t __riscv_vandn_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vandn_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, + size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector Basic Bit-manipulation - Reverse + +[,c] +---- +vuint8mf8_t __riscv_vbrev_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vbrev_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vbrev_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vbrev_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vbrev_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vbrev_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vbrev_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vbrev_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vbrev_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vbrev_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vbrev_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vbrev_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vbrev_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vbrev_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vbrev_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vbrev_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vbrev_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vbrev_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vbrev_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vbrev_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vbrev_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vbrev_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +vuint8mf8_t __riscv_vbrev8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vbrev8_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vbrev8_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vbrev8_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vbrev8_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vbrev8_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vbrev8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vbrev8_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vbrev8_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vbrev8_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vbrev8_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vbrev8_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vbrev8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vbrev8_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vbrev8_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vbrev8_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vbrev8_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vbrev8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vbrev8_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vbrev8_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vbrev8_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vbrev8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +vuint8mf8_t __riscv_vrev8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vrev8_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vrev8_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vrev8_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vrev8_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vrev8_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vrev8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vrev8_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vrev8_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vrev8_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vrev8_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vrev8_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vrev8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vrev8_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vrev8_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vrev8_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vrev8_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vrev8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vrev8_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vrev8_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vrev8_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vrev8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vbrev_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vbrev_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vbrev_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vbrev_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vbrev_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vbrev_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vbrev_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vbrev_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vbrev_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vbrev_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vbrev_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vbrev_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vbrev_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vbrev_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vbrev_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vbrev_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vbrev_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vbrev_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vbrev_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vbrev_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vbrev_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +vuint8mf8_t __riscv_vbrev8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vbrev8_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vbrev8_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vbrev8_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vbrev8_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vbrev8_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vbrev8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vbrev8_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vbrev8_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vbrev8_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vbrev8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vbrev8_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vbrev8_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vbrev8_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vbrev8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vbrev8_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vbrev8_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vbrev8_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vbrev8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +vuint8mf8_t __riscv_vrev8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vrev8_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vrev8_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vrev8_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vrev8_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vrev8_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vrev8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vrev8_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vrev8_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vrev8_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vrev8_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vrev8_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vrev8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vrev8_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vrev8_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vrev8_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vrev8_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vrev8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vrev8_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vrev8_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vrev8_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vrev8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vbrev_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vbrev_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vbrev_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vbrev_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vbrev_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vbrev_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vbrev_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vbrev_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vbrev_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vbrev_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vbrev_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vbrev_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vbrev_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vbrev_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vbrev_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vbrev_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vbrev_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vbrev_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vbrev_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vbrev_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vbrev_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +vuint8mf8_t __riscv_vbrev8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vbrev8_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vbrev8_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vbrev8_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vbrev8_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vbrev8_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vbrev8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vbrev8_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vbrev8_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vbrev8_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vbrev8_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vbrev8_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vbrev8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vbrev8_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vbrev8_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vbrev8_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vbrev8_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vbrev8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vbrev8_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vbrev8_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vbrev8_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vbrev8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +vuint8mf8_t __riscv_vrev8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vrev8_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vrev8_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vrev8_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vrev8_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vrev8_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vrev8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vrev8_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vrev8_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vrev8_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vrev8_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vrev8_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vrev8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vrev8_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vrev8_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vrev8_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vrev8_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vrev8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vrev8_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vrev8_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vrev8_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vrev8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector Basic Bit-manipulation - Count Bits + +[,c] +---- +vuint8mf8_t __riscv_vclz_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vclz_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vclz_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vclz_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vclz_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vclz_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vclz_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vclz_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vclz_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vclz_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vclz_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vclz_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vclz_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vclz_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vclz_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vclz_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vclz_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vclz_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vclz_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vclz_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vclz_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vclz_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +vuint8mf8_t __riscv_vctz_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vctz_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vctz_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vctz_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vctz_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vctz_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vctz_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vctz_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vctz_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vctz_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vctz_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vctz_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vctz_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vctz_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vctz_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vctz_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vctz_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vctz_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vctz_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vctz_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vctz_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vctz_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vclz_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vclz_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vclz_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vclz_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vclz_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vclz_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vclz_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vclz_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vclz_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vclz_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vclz_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vclz_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vclz_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vclz_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vclz_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vclz_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vclz_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vclz_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vclz_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vclz_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vclz_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +vuint8mf8_t __riscv_vctz_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vctz_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vctz_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vctz_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vctz_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vctz_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vctz_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vctz_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vctz_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vctz_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vctz_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vctz_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vctz_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vctz_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vctz_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vctz_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vctz_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vctz_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vctz_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vctz_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vctz_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vctz_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vclz_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vclz_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vclz_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vclz_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vclz_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vclz_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vclz_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vclz_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vclz_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vclz_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vclz_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vclz_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vclz_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vclz_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vclz_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vclz_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vclz_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vclz_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vclz_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vclz_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vclz_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +vuint8mf8_t __riscv_vctz_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vctz_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vctz_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vctz_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vctz_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vctz_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vctz_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vctz_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vctz_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vctz_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vctz_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vctz_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vctz_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vctz_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vctz_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vctz_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vctz_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vctz_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vctz_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vctz_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vctz_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vctz_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vcpop_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vcpop_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vcpop_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vcpop_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vcpop_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vcpop_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vcpop_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vcpop_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vcpop_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vcpop_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vcpop_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vcpop_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vcpop_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vcpop_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vcpop_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vcpop_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vcpop_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vcpop_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vcpop_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vcpop_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vcpop_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vcpop_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vcpop_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vcpop_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vcpop_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vcpop_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vcpop_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vcpop_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vcpop_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vcpop_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vcpop_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vcpop_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vcpop_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vcpop_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vcpop_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vcpop_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vcpop_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vcpop_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vcpop_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vcpop_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vcpop_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vcpop_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl); +vuint8mf4_t __riscv_vcpop_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl); +vuint8mf2_t __riscv_vcpop_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl); +vuint8m1_t __riscv_vcpop_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl); +vuint8m2_t __riscv_vcpop_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl); +vuint8m4_t __riscv_vcpop_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl); +vuint8m8_t __riscv_vcpop_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl); +vuint16mf4_t __riscv_vcpop_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl); +vuint16mf2_t __riscv_vcpop_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl); +vuint16m1_t __riscv_vcpop_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl); +vuint16m2_t __riscv_vcpop_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl); +vuint16m4_t __riscv_vcpop_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl); +vuint16m8_t __riscv_vcpop_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl); +vuint32mf2_t __riscv_vcpop_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl); +vuint32m1_t __riscv_vcpop_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl); +vuint32m2_t __riscv_vcpop_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl); +vuint32m4_t __riscv_vcpop_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl); +vuint32m8_t __riscv_vcpop_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl); +vuint64m1_t __riscv_vcpop_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl); +vuint64m2_t __riscv_vcpop_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl); +vuint64m4_t __riscv_vcpop_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl); +vuint64m8_t __riscv_vcpop_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector Bit-manipulation used in Cryptography - Rotate + +[,c] +---- +vuint8mf8_t __riscv_vrol_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint8mf8_t __riscv_vrol_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, + size_t vl); +vuint8mf4_t __riscv_vrol_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint8mf4_t __riscv_vrol_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, + size_t vl); +vuint8mf2_t __riscv_vrol_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint8mf2_t __riscv_vrol_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl); +vuint8m1_t __riscv_vrol_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl); +vuint8m1_t __riscv_vrol_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl); +vuint8m2_t __riscv_vrol_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl); +vuint8m2_t __riscv_vrol_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl); +vuint8m4_t __riscv_vrol_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl); +vuint8m4_t __riscv_vrol_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl); +vuint8m8_t __riscv_vrol_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl); +vuint8m8_t __riscv_vrol_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, + size_t vl); +vuint16mf4_t __riscv_vrol_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, + size_t vl); +vuint16mf2_t __riscv_vrol_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, + size_t vl); +vuint16m1_t __riscv_vrol_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint16m1_t __riscv_vrol_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl); +vuint16m2_t __riscv_vrol_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint16m2_t __riscv_vrol_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl); +vuint16m4_t __riscv_vrol_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint16m4_t __riscv_vrol_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl); +vuint16m8_t __riscv_vrol_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl); +vuint16m8_t __riscv_vrol_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, + size_t vl); +vuint32mf2_t __riscv_vrol_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, + size_t vl); +vuint32m1_t __riscv_vrol_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m1_t __riscv_vrol_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl); +vuint32m2_t __riscv_vrol_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m2_t __riscv_vrol_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl); +vuint32m4_t __riscv_vrol_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m4_t __riscv_vrol_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl); +vuint32m8_t __riscv_vrol_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32m8_t __riscv_vrol_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, + size_t vl); +vuint64m1_t __riscv_vrol_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vrol_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, + size_t vl); +vuint64m2_t __riscv_vrol_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vrol_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, + size_t vl); +vuint64m4_t __riscv_vrol_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vrol_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, + size_t vl); +vuint64m8_t __riscv_vrol_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vrol_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, + size_t vl); +vuint8mf8_t __riscv_vror_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint8mf8_t __riscv_vror_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, + size_t vl); +vuint8mf4_t __riscv_vror_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint8mf4_t __riscv_vror_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, + size_t vl); +vuint8mf2_t __riscv_vror_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint8mf2_t __riscv_vror_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl); +vuint8m1_t __riscv_vror_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl); +vuint8m1_t __riscv_vror_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl); +vuint8m2_t __riscv_vror_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl); +vuint8m2_t __riscv_vror_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl); +vuint8m4_t __riscv_vror_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl); +vuint8m4_t __riscv_vror_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl); +vuint8m8_t __riscv_vror_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl); +vuint8m8_t __riscv_vror_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, + size_t vl); +vuint16mf4_t __riscv_vror_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, + size_t vl); +vuint16mf2_t __riscv_vror_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, + size_t vl); +vuint16m1_t __riscv_vror_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint16m1_t __riscv_vror_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl); +vuint16m2_t __riscv_vror_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint16m2_t __riscv_vror_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl); +vuint16m4_t __riscv_vror_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint16m4_t __riscv_vror_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl); +vuint16m8_t __riscv_vror_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl); +vuint16m8_t __riscv_vror_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, + size_t vl); +vuint32mf2_t __riscv_vror_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, + size_t vl); +vuint32m1_t __riscv_vror_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m1_t __riscv_vror_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl); +vuint32m2_t __riscv_vror_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m2_t __riscv_vror_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl); +vuint32m4_t __riscv_vror_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m4_t __riscv_vror_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl); +vuint32m8_t __riscv_vror_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32m8_t __riscv_vror_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, + size_t vl); +vuint64m1_t __riscv_vror_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vror_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, + size_t vl); +vuint64m2_t __riscv_vror_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vror_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, + size_t vl); +vuint64m4_t __riscv_vror_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vror_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, + size_t vl); +vuint64m8_t __riscv_vror_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vror_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, + size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector Basic Bit-manipulation used - Widening Shift + +[,c] +---- +vuint16mf4_t __riscv_vwsll_tu(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl); +vuint16mf4_t __riscv_vwsll_tu(vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, + size_t vl); +vuint16mf2_t __riscv_vwsll_tu(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl); +vuint16mf2_t __riscv_vwsll_tu(vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, + size_t vl); +vuint16m1_t __riscv_vwsll_tu(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl); +vuint16m1_t __riscv_vwsll_tu(vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl); +vuint16m2_t __riscv_vwsll_tu(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl); +vuint16m2_t __riscv_vwsll_tu(vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl); +vuint16m4_t __riscv_vwsll_tu(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl); +vuint16m4_t __riscv_vwsll_tu(vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl); +vuint16m8_t __riscv_vwsll_tu(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl); +vuint16m8_t __riscv_vwsll_tu(vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl); +vuint32mf2_t __riscv_vwsll_tu(vuint32mf2_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tu(vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, + size_t vl); +vuint32m1_t __riscv_vwsll_tu(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl); +vuint32m1_t __riscv_vwsll_tu(vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, + size_t vl); +vuint32m2_t __riscv_vwsll_tu(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl); +vuint32m2_t __riscv_vwsll_tu(vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl); +vuint32m4_t __riscv_vwsll_tu(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl); +vuint32m4_t __riscv_vwsll_tu(vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl); +vuint32m8_t __riscv_vwsll_tu(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl); +vuint32m8_t __riscv_vwsll_tu(vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl); +vuint64m1_t __riscv_vwsll_tu(vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl); +vuint64m1_t __riscv_vwsll_tu(vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, + size_t vl); +vuint64m2_t __riscv_vwsll_tu(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint64m2_t __riscv_vwsll_tu(vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl); +vuint64m4_t __riscv_vwsll_tu(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint64m4_t __riscv_vwsll_tu(vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl); +vuint64m8_t __riscv_vwsll_tu(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint64m8_t __riscv_vwsll_tu(vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc new file mode 100644 index 000000000..7e561c15e --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc @@ -0,0 +1,140 @@ + +=== Zvbc - Vector Carryless Multiplication + +[[policy-variant-overloaded]] +==== Vector Carryless Multiplication + +[,c] +---- +vuint64m1_t __riscv_vclmul_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vclmul_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, + size_t vl); +vuint64m2_t __riscv_vclmul_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vclmul_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, + size_t vl); +vuint64m4_t __riscv_vclmul_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vclmul_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, + size_t vl); +vuint64m8_t __riscv_vclmul_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vclmul_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, + size_t vl); +vuint64m1_t __riscv_vclmulh_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m1_t __riscv_vclmulh_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, + size_t vl); +vuint64m2_t __riscv_vclmulh_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m2_t __riscv_vclmulh_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, + size_t vl); +vuint64m4_t __riscv_vclmulh_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m4_t __riscv_vclmulh_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, + size_t vl); +vuint64m8_t __riscv_vclmulh_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint64m8_t __riscv_vclmulh_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, + size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc new file mode 100644 index 000000000..5073e6a96 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc @@ -0,0 +1,24 @@ + +=== Zvkg - Vector GCM/GMAC + +[[policy-variant-overloaded]] +==== Vector GCM/GMAC + +[,c] +---- +vuint32mf2_t __riscv_vghsh_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vghsh_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m2_t __riscv_vghsh_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m4_t __riscv_vghsh_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m8_t __riscv_vghsh_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint32mf2_t __riscv_vgmul_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vgmul_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vgmul_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vgmul_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vgmul_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc new file mode 100644 index 000000000..6adfda0e4 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc @@ -0,0 +1,140 @@ + +=== Zvkned - NIST Suite: Vector AES Block Cipher + +[[policy-variant-overloaded]] +==== Vector AES Encryption + +[,c] +---- +vuint32mf2_t __riscv_vaesef_vv_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef_vs_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vv_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vv_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vv_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vv_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vv_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vs_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vv_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vv_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vv_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vv_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector AES Decryption + +[,c] +---- +vuint32mf2_t __riscv_vaesdf_vv_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf_vs_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vv_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vv_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vv_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vv_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vv_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vs_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vv_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vv_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vv_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vv_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector AES-128 Forward KeySchedule generation + +[,c] +---- +vuint32mf2_t __riscv_vaeskf1_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, + size_t vl); +vuint32m1_t __riscv_vaeskf1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, + size_t vl); +vuint32m2_t __riscv_vaeskf1_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, + size_t vl); +vuint32m4_t __riscv_vaeskf1_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, + size_t vl); +vuint32m8_t __riscv_vaeskf1_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, + size_t vl); +vuint32mf2_t __riscv_vaeskf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, + size_t vl); +vuint32m1_t __riscv_vaeskf2_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, + size_t vl); +vuint32m2_t __riscv_vaeskf2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, + size_t vl); +vuint32m4_t __riscv_vaeskf2_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, + size_t vl); +vuint32m8_t __riscv_vaeskf2_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, + size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector AES round zero + +[,c] +---- +vuint32mf2_t __riscv_vaesz_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc new file mode 100644 index 000000000..4185db4b7 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc @@ -0,0 +1,70 @@ + +=== Zvknh - NIST Suite: Vector SHA-2 Secure Hash + +[[policy-variant-overloaded]] +==== Vector SHA-2 message schedule + +[,c] +---- +vuint32mf2_t __riscv_vsha2ms_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ms_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m2_t __riscv_vsha2ms_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m4_t __riscv_vsha2ms_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m8_t __riscv_vsha2ms_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint64m1_t __riscv_vsha2ms_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m2_t __riscv_vsha2ms_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m4_t __riscv_vsha2ms_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m8_t __riscv_vsha2ms_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector SHA-2 two rounds of compression + +[,c] +---- +vuint32mf2_t __riscv_vsha2ch_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ch_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m2_t __riscv_vsha2ch_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m4_t __riscv_vsha2ch_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m8_t __riscv_vsha2ch_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint64m1_t __riscv_vsha2ch_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m2_t __riscv_vsha2ch_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m4_t __riscv_vsha2ch_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m8_t __riscv_vsha2ch_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +vuint32mf2_t __riscv_vsha2cl_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2cl_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m2_t __riscv_vsha2cl_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m4_t __riscv_vsha2cl_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m8_t __riscv_vsha2cl_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +vuint64m1_t __riscv_vsha2cl_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl); +vuint64m2_t __riscv_vsha2cl_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl); +vuint64m4_t __riscv_vsha2cl_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl); +vuint64m8_t __riscv_vsha2cl_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc new file mode 100644 index 000000000..83031bc0c --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc @@ -0,0 +1,45 @@ + +=== Zvksed - ShangMi Suite: SM4 Block Cipher + +[[policy-variant-overloaded]] +==== Vector SM4 KeyExpansion + +[,c] +---- +vuint32mf2_t __riscv_vsm4k_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, + size_t vl); +vuint32m1_t __riscv_vsm4k_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, + size_t vl); +vuint32m2_t __riscv_vsm4k_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, + size_t vl); +vuint32m4_t __riscv_vsm4k_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, + size_t vl); +vuint32m8_t __riscv_vsm4k_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, + size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector SM4 Rounds + +[,c] +---- +vuint32mf2_t __riscv_vsm4r_vv_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vsm4r_vs_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vv_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vv_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vv_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vv_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc new file mode 100644 index 000000000..fb575f001 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc @@ -0,0 +1,36 @@ + +=== Zvksh - ShangMi Suite: SM3 Secure Hash + +[[policy-variant-overloaded]] +==== Vector SM3 Message Expansion + +[,c] +---- +vuint32mf2_t __riscv_vsm3me_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsm3me_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl); +vuint32m2_t __riscv_vsm3me_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl); +vuint32m4_t __riscv_vsm3me_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl); +vuint32m8_t __riscv_vsm3me_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector SM3 Compression + +[,c] +---- +vuint32mf2_t __riscv_vsm3c_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, + size_t vl); +vuint32m1_t __riscv_vsm3c_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, + size_t vl); +vuint32m2_t __riscv_vsm3c_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, + size_t vl); +vuint32m4_t __riscv_vsm3c_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, + size_t vl); +vuint32m8_t __riscv_vsm3c_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, + size_t vl); +---- diff --git a/doc/header.adoc b/doc/header.adoc index f55edf3a4..407d4179c 100644 --- a/doc/header.adoc +++ b/doc/header.adoc @@ -46,6 +46,10 @@ may not conform to the future standard. include::preface.adoc[] include::rvv-intrinsic-spec.adoc[] +include::vector-bfloat16-spec.adoc[] + +include::references.adoc[] + include::rvv-intrinsic-examples.adoc[] diff --git a/doc/preface.adoc b/doc/preface.adoc index bf0d5aa66..5c161e570 100644 --- a/doc/preface.adoc +++ b/doc/preface.adoc @@ -17,7 +17,6 @@ This RISC-V specification has been contributed to directly or indirectly by (in Contributors to all versions of the spec in alphabetical order: Brandon Wu, -Camel Coder, Craig Topper, Eop Chen, HanKuan Chen, @@ -25,6 +24,7 @@ HsiangKai Wang, Jerry Zhang Jian, Kito Cheng, Nick Knight, +Olaf Bernstein, Roger Ferrer Ibanez, Yi-Hsiu Hsu, Zakk Chen diff --git a/doc/references.adoc b/doc/references.adoc new file mode 100644 index 000000000..d5197e47f --- /dev/null +++ b/doc/references.adoc @@ -0,0 +1,67 @@ +== References + +^0^https://github.com/riscv/riscv-v-spec/blob/master/v-spec.adoc[Github - riscv/riscv-v-spec/v-spec.adoc] + +NOTE: Standard extensions are merged into `riscv/riscv-isa-manual` after ratification. There is an on-going pull request ^26^ for the "V" extension to be merged. At this moment this intrinsics specification still references the frozen draft ^0^. This reference will be updated in the future once the pull request has been merged. + +^1^https://github.com/riscv-non-isa/riscv-c-api-doc/blob/master/riscv-c-api.md[Github - riscv-non-isa/riscv-c-api-doc/riscv-c-api.md] + +^2^https://llvm.org/docs/RISCVUsage.html[User Guide for RISC-V Target] + +^3^https://gcc.gnu.org/onlinedocs/gcc/RISC-V-Options.html[RISC-V Options (Using the GNU Compiler Collection (GCC))] + +^4^Section 3.4.1 (Vector selected element width `vsew[2:0]`) in the specification ^0^ + +^5^Section 3.4.2 (Vector Register Grouping (`vlmul[2:0]``)) in the specification ^0^ + +^6^Section 3.4.3 (Vector Tail Agnostic and Vector Mask Agnostic `vta` and `vma`) in the specification ^0^ + +^7^Section 5.3 (Vector Masking) in the specification ^0^ + +^8^Section 3.8 (Vector Fixed-Point Rounding Mode Register `vxrm`) in the specification ^0^ + +^9^https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#vector-register-convention[psABI: Vector Register Convention] + +^10^https://riscv.org/wp-content/uploads/2017/05/riscv-spec-v2.2.pdf[The RISC-V Instruction Set Manual: 8.2 Floating-Point Control and Status Register] + +^11^Section 3.5 (Vector Length Register) in the specification ^0^ + +^12^Section 3.4.2 in the specification ^0^ + +^13^Section 11.13, 11.14, 13.6, 13.7 in the specification ^0^ + +^14^Section 4.5 (Mask Register Layout) in the specification ^0^ + +^15^Section 7.5 in the specification ^0^ + +^16^Section 7.8 in the specification ^0^ + +^17^Section 5.2 (Vector Operands) in the specification ^0^ + +^18^Section 6 (Configuration-Setting Instructions) in the specification ^0^ + +^19^Section 18 (Standrad Vector Extensions) in the specification ^0^ + +^20^Section 18.2 (Zve*: Vector Extensions for Embedded Processors) in the specification ^0^ + +^21^Section 12 (Vector Fixed-Point Arithmetic Instructions) in the specification ^0^ + +^22^Section 3.9 (3.9. Vector Fixed-Point Saturation Flag vxsat) in the specification ^0^ + +^23^Section 13 (Vector Floating-Point Instructions) in the specification ^0^ + +^24^Section 16.3.1 (Vector Slideup Instructions) in the specification ^0^ + +^25^Section 3.7 (Vector Start Index CSR `vstart`) in the specification ^0^ + +^26^https://github.com/riscv/riscv-isa-manual/pull/1088[riscv/riscv-isa-manual#1088] + +^27^Section 6.3 (Constraints on Setting `vl`) in the specficiation ^0^ + +^28^Section 6.4 (Example of stripmining and changes to SEW) in the specification ^0^ + +^29^Section 3.6 (Vector Byte Length `vlenb`) in the specification ^0^ + +^30^Section 16.6 (Whole Vector Register Move) in the specification ^0^ + +^31^https://github.com/riscv/riscv-bfloat16/releases[RISC-V BFloat16 Specification] \ No newline at end of file diff --git a/doc/rvv-intrinsic-examples.adoc b/doc/rvv-intrinsic-examples.adoc index 6de62ca69..0d16c4f0c 100644 --- a/doc/rvv-intrinsic-examples.adoc +++ b/doc/rvv-intrinsic-examples.adoc @@ -103,7 +103,7 @@ void matmul_rvv(double *a, double *b, double *c, int n, int m, int p) { // Set accumulator to zero. vfloat64m1_t vec_s = __riscv_vfmv_v_f_f64m1(0.0, vlmax); vfloat64m1_t vec_zero = __riscv_vfmv_v_f_f64m1(0.0, vlmax); - for (size_t vl; k > 0; k -= vl) { + for (size_t vl; k > 0; k -= vl, ptr_a += vl, ptr_b += vl * m) { vl = __riscv_vsetvl_e64m1(k); // Load row a[i][k..k+vl) diff --git a/doc/rvv-intrinsic-spec.adoc b/doc/rvv-intrinsic-spec.adoc index 4685f1a53..5f9260805 100644 --- a/doc/rvv-intrinsic-spec.adoc +++ b/doc/rvv-intrinsic-spec.adoc @@ -8,6 +8,8 @@ This document uses the term "RVV" as an abbreviation for the RISC-V "V" extensio The `__riscv_v_intrinsic` macro is the C macro to test the compiler's support for the RISC-V "V" extension intrinsics. +This macro should be defined even if the vector extension is not enabled. + The value of the test macro is defined as its version, which is computed using the following formula. The formula is identical to what is defined in the RISC-V C API specification cite:[riscv-c-api]. ---- @@ -22,7 +24,7 @@ To leverage the intrinsics in the toolchain, the header `` needs [,c] ---- -#ifdef __riscv_v_intrinsic +#if __riscv_v_intrinsic >= 1000000 #include #endif /* __riscv_v_intrinsic */ ---- @@ -109,9 +111,9 @@ NOTE: The RISC-V psABI cite:[riscv-cc-vector] states that `vxrm` is not preserve [NOTE] ==== -This version of the specification of does not cover the control of the vector fixed-point saturation flag (`vxsat`). Support for this feature is planned for a later version of the specification in a way that is compatible with existing fixed-point intrinsics. No mechanism to set or retrieve the value of `vxsat` is specified either. +This specification does not provide support for manipulating the `vxsat` CSR. Since vxsat is not needed by a large majority of fixed-point code, we believe this specification is broadly useful as-is. Nevertheless, we expect that a future extension will define an additional set of fixed-point intrinsics that update `vxsat` in a specified manner, along with intrinsics to explicitly read and write `vxsat`. These new intrinsics would be interoperable with the intrinsics in this specification. -The value of the `vxsat` after a fixed-point intrinsic is UNSPECIFIED. This includes the order in which the flag `vxsat` is updated in a program that executes a sequence of fixed-point intrinsics. +The value of the `vxsat` after a fixed-point intrinsic is UNSPECIFIED. ==== [[control-of-frm]] diff --git a/doc/vector-bfloat16-spec.adoc b/doc/vector-bfloat16-spec.adoc new file mode 100644 index 000000000..77d041a45 --- /dev/null +++ b/doc/vector-bfloat16-spec.adoc @@ -0,0 +1,45 @@ +== Intrinsics for BFloat16 (Brain Float 16) instruction set extensions + +The RISC-V vector C intrinsics supports intrinsics that exposes the control of BFloat16 (Brain Float 16) instruction set extensions ^31^. + +[[bf16-naming-scheme]] +=== Naming scheme + +The BFloat16 intrinsics follows the naming scheme defined under <>, with `bf` as the abbreviation for BFloat16 types in the function suffix. + +[[bf16-vector-programming-model]] +=== Control of the vector extension programming model + +The BFloat16 intrinsics follows provides the same control of the vector programming model defined under <>. Intrinsics that represents BFloat16 instructions that are affected by `frm` (`vfncvtbf16.f.f.w` and `vfwmaccbf16`) follow what is defined under <> and provides variants of <> and <>. + +[[bf16-type-system]] +=== Type system + +Floating-point types have EEW and EMUL encoded into the type. The first row describes the EMUL and the first column describes the data type and element width of the scalar type. + +Floating-point types with element widths of 16 (Types=`__bf16`) require the `zfbfmin` and `zvfbfmin` extension to be specified in the architecture. + +NOTE: Although C++23 introduces `` for fixed-width floating-point types, this latest standard is not yet supported in the upstream RISC-V compiler. The specification (along with the prototype lists in appendix) uses `__bf16` to represent the BFloat16 floating-point type. + +.BFloat16 types +[options="autowidth,header",float="center",align="center",cols="<1,<2,<2,<2,<2,<2,<2,<2"] +|=== +| Types | EMUL=1/8 | EMUL=1/4 | EMUL=1/ 2 | EMUL=1 | EMUL=2 | EMUL=4 | EMUL=8 +| __bf16 | N/A | vbfloat16mf4_t | vbfloat16mf2_t | vbfloat16m1_t | vbfloat16m2_t | vbfloat16m4_t | vbfloat16m8_t +|=== + +.Tuple types +[options="autowidth,header",float="center",align="center",cols="<1,<2,<2,<2,<2,<2,<2,<2"] +|=== +| Non-tuple Types (NFILED=1)| NFIELD=2 | NFIELD=3 | NFIELD=4 | NFIELD=5 | NFIELD=6 | NFIELD=7 | NFIELD=8 +| vbfloat16mf4_t | vbfloat16mf4x2_t | vbfloat16mf4x3_t | vbfloat16mf4x4_t | vbfloat16mf4x5_t | vbfloat16mf4x6_t | vbfloat16mf4x7_t | vbfloat16mf4x8_t +| vbfloat16mf2_t | vbfloat16mf2x2_t | vbfloat16mf2x3_t | vbfloat16mf2x4_t | vbfloat16mf2x5_t | vbfloat16mf2x6_t | vbfloat16mf2x7_t | vbfloat16mf2x8_t +| vbfloat16m1_t | vbfloat16m1x2_t | vbfloat16m1x3_t | vbfloat16m1x4_t | vbfloat16m1x5_t | vbfloat16m1x6_t | vbfloat16m1x7_t | vbfloat16m1x8_t +| vbfloat16m2_t | vbfloat16m2x2_t | vbfloat16m2x3_t | vbfloat16m2x4_t | N/A | N/A | N/A | N/A +| vbfloat16m4_t | vbfloat16m4x2_t | N/A | N/A | N/A | N/A | N/A | N/A +|=== + +[[bf16-pseudo-intrinsics]] +=== Psuedo intrinsics + +The RISC-V vector BFloat16 types (provided under <>) also have pseudo intrinsics variants from <> to help variable declaration and manipulation across intrinsic types. diff --git a/rvv-intrinsic-generator/.pylintrc b/rvv-intrinsic-generator/.pylintrc index ae0b04986..721bd54fb 100644 --- a/rvv-intrinsic-generator/.pylintrc +++ b/rvv-intrinsic-generator/.pylintrc @@ -424,7 +424,7 @@ valid-metaclass-classmethod-first-arg=mcs # Exceptions that will emit a warning when being caught. Defaults to # "Exception" -overgeneral-exceptions=StandardError, - Exception, - BaseException +overgeneral-exceptions=builtins.StandardError, + builtins.Exception, + builtins.BaseException diff --git a/rvv-intrinsic-generator/Makefile b/rvv-intrinsic-generator/Makefile index 3d26481ae..f52ca80f0 100644 --- a/rvv-intrinsic-generator/Makefile +++ b/rvv-intrinsic-generator/Makefile @@ -31,10 +31,17 @@ __check_defined = \ $(error Undefined $1$(if $2, ($2)))) # Replace softfloat float-point types with LLVM compatible floating-point types +# macOS uses BSD sed +ifeq ($(shell uname), Darwin) + SED_CMD = sed -i '' +else + SED_CMD = sed -i +endif + replace_float = \ - sed -i 's/float16_t/_Float16/g' $(1)/*; \ - sed -i 's/float32_t/float/g' $(1)/*; \ - sed -i 's/float64_t/double/g' $(1)/* + $(SED_CMD) 's/float16_t/_Float16/g' $(1)/*; \ + $(SED_CMD) 's/float32_t/float/g' $(1)/*; \ + $(SED_CMD) 's/float64_t/double/g' $(1)/* ############################################################################### # Variables @@ -51,8 +58,12 @@ PYTHONPATHS = $(RVV_INTRINSIC_GEN_PATH):$(ABS_VENDOR_PATH) PY3 := PYTHONPATH=$$PYTHONPATH:$(PYTHONPATHS) python3 # Main entry script of the generator MAIN := rvv_intrinsic_gen.main +# BFloat16 instruction scripts +BF16_INST := $(RVV_INTRINSIC_GEN_PATH)/bfloat16_inst.py # Script to clang-format the auto-generated adoc files CLANG_FORMAT_ADOC = clang_format_autogen +# Extra flags specified when calling rvv_intrinsic_gen.main +EXTRA_FLAG := # Main output directory is default to auto-generated OUTPUT_DIR := ../auto-generated # Derives output directory for each set of intrinsics @@ -60,6 +71,14 @@ OUTPUT_DIR := ../auto-generated DIR := $(abspath $(OUTPUT_DIR)) # Output directory for policy intrinsics POLICY_DIR := $(DIR)/policy_funcs +# Output directory for bfloat16 non-policy intrinsics +BF16_DIR := $(DIR)/bfloat16 +# Output directory for bfloat16 policy intrinsics +BF16_POLICY_DIR := $(BF16_DIR)/policy_funcs +# Output directory for vector-crypto non-policy intrinsics +VECTOR_CRYPTO_DIR := $(DIR)/vector-crypto +# Output directory for vector-crypto policy intrinsics +VECTOR_CRYPTO_POLICY_DIR := $(VECTOR_CRYPTO_DIR)/policy_funcs # Directory that stores the v0.10 unit tests LEGACY_API_TESTS_DIR := $(abspath ../legacy-api-unit-tests) # Derived variable to trigger option --vendor-inst @@ -140,72 +159,81 @@ endef # If VENDOR_GENERATOR_SCRIPT is defined, also trigger it in all. # NOTE: A possible enhancement to this is allow multiple targets be added here ifdef VENDOR_GENERATOR_SCRIPT -all: gen-document gen-test gen-compatible-header vendor-generator +all: gen-document gen-test gen-compatible-header bf16-all vector-crypto-all vendor-generator else -all: gen-document gen-test gen-compatible-header +all: gen-document gen-test gen-compatible-header bf16-all vector-crypto-all endif +bf16-all: gen-bf16-document gen-bf16-test +vector-crypto-all: gen-vector-crypto-document gen-vector-crypto-test + gen-document: non-overloaded-doc non-overloaded-docs overloaded-doc overloaded-docs +gen-bf16-document: bf16-non-overloaded-doc bf16-non-overloaded-docs bf16-overloaded-doc bf16-overloaded-docs +gen-vector-crypto-document: vector-crypto-non-overloaded-doc vector-crypto-non-overloaded-docs vector-crypto-overloaded-doc vector-crypto-overloaded-docs gen-test: non-overloaded-test overloaded-test gen-llvm-test gen-gnu-test +gen-bf16-test: bf16-non-overloaded-test bf16-overloaded-test gen-bf16-llvm-test +gen-vector-crypto-test: vector-crypto-non-overloaded-test vector-crypto-overloaded-test gen-vector-crypto-llvm-test gen-compatible-header: non-policy-compatible-header policy-compatible-header non-policy-overloaded-compatible-header policy-overloaded-compatible-header gen-llvm-test: llvm-non-overloaded-test llvm-overloaded-test +gen-bf16-llvm-test: bf16-llvm-non-overloaded-test bf16-llvm-overloaded-test +gen-vector-crypto-llvm-test: vector-crypto-llvm-non-overloaded-test vector-crypto-llvm-overloaded-test gen-gnu-test: gnu-overloaded-test gnu-non-overloaded-test # Generate all-in-one document for non-overloaded intrinsics non-overloaded-doc: - $(call gen_doc,$(DIR),intrinsic_funcs.adoc,$@,) - $(call gen_doc,$(POLICY_DIR),intrinsic_funcs.adoc,$@,--has-policy) + $(call gen_doc,$(DIR),intrinsic_funcs.adoc,$@,$(EXTRA_FLAG)) + $(call gen_doc,$(POLICY_DIR),intrinsic_funcs.adoc,$@,--has-policy $(EXTRA_FLAG)) $(call clang_format_adoc, --file, $(DIR)/intrinsic_funcs.adoc) $(call clang_format_adoc, --file, $(POLICY_DIR)/intrinsic_funcs.adoc) # Generate grouped documents for non-overloaded intrinsics non-overloaded-docs: - $(call gen_docs,$(DIR),intrinsic_funcs,$@,) - $(call gen_docs,$(POLICY_DIR),intrinsic_funcs,$@,--has-policy) + $(call gen_docs,$(DIR),intrinsic_funcs,$@,$(EXTRA_FLAG)) + $(call gen_docs,$(POLICY_DIR),intrinsic_funcs,$@,--has-policy $(EXTRA_FLAG)) $(call clang_format_adoc, --folder, $(DIR)/intrinsic_funcs) $(call clang_format_adoc, --folder, $(POLICY_DIR)/intrinsic_funcs) # Generate all-in-one document for overloaded intrinsics overloaded-doc: - $(call gen_doc,$(DIR),overloaded_intrinsic_funcs.adoc,$@,) - $(call gen_doc,$(POLICY_DIR),overloaded_intrinsic_funcs.adoc,$@,--has-policy) + $(call gen_doc,$(DIR),overloaded_intrinsic_funcs.adoc,$@,$(EXTRA_FLAG)) + $(call gen_doc,$(POLICY_DIR),overloaded_intrinsic_funcs.adoc,$@,--has-policy $(EXTRA_FLAG)) $(call clang_format_adoc, --file, $(DIR)/overloaded_intrinsic_funcs.adoc) $(call clang_format_adoc, --file, $(POLICY_DIR)/overloaded_intrinsic_funcs.adoc) # Generate grouped documents for overloaded intrinsics overloaded-docs: - $(call gen_docs,$(DIR),overloaded_intrinsic_funcs,$@,) - $(call gen_docs,$(POLICY_DIR),overloaded_intrinsic_funcs,$@,--has-policy) + $(call gen_docs,$(DIR),overloaded_intrinsic_funcs,$@,$(EXTRA_FLAG)) + $(call gen_docs,$(POLICY_DIR),overloaded_intrinsic_funcs,$@,--has-policy $(EXTRA_FLAG)) $(call clang_format_adoc, --folder, $(DIR)/overloaded_intrinsic_funcs) $(call clang_format_adoc, --folder, $(POLICY_DIR)/overloaded_intrinsic_funcs) # Generate non-overloaded intrinsic testing C source files non-overloaded-test: - $(call gen_tests,$(DIR)/api-testing,non-overloaded-test,) - $(call gen_tests,$(POLICY_DIR)/api-testing,non-overloaded-test,--has-policy) + $(call gen_tests,$(DIR)/api-testing,non-overloaded-test,$(EXTRA_FLAG)) + $(call gen_tests,$(POLICY_DIR)/api-testing,non-overloaded-test,--has-policy $(EXTRA_FLAG)) clang-format -i $(DIR)/api-testing/* clang-format -i $(POLICY_DIR)/api-testing/* # Generate overloaded intrinsic testing C source files overloaded-test: - $(call gen_tests,$(DIR)/overloaded-api-testing,overloaded-test,) - $(call gen_tests,$(POLICY_DIR)/overloaded-api-testing,overloaded-test,--has-policy) + $(call gen_tests,$(DIR)/overloaded-api-testing,overloaded-test,$(EXTRA_FLAG)) + $(call gen_tests,$(POLICY_DIR)/overloaded-api-testing,overloaded-test,--has-policy $(EXTRA_FLAG)) clang-format -i $(DIR)/overloaded-api-testing/* clang-format -i $(POLICY_DIR)/overloaded-api-testing/* # Generate non-overloaded intrinsic testing C source files llvm-non-overloaded-test: - $(call gen_tests,$(DIR)/llvm-api-tests,non-overloaded-test,--toolchain-type llvm) - $(call gen_tests,$(POLICY_DIR)/llvm-api-tests,non-overloaded-test,--toolchain-type llvm --has-policy) + $(call gen_tests,$(DIR)/llvm-api-tests,non-overloaded-test,--toolchain-type llvm $(EXTRA_FLAG)) + $(call gen_tests,$(POLICY_DIR)/llvm-api-tests,non-overloaded-test,--toolchain-type llvm --has-policy $(EXTRA_FLAG)) $(call replace_float, $(DIR)/llvm-api-tests) $(call replace_float, $(POLICY_DIR)/llvm-api-tests) clang-format -i $(DIR)/llvm-api-tests/* - clang-format -i $(POLICY_DIR)/overloaded-api-testing/* + clang-format -i $(POLICY_DIR)/llvm-api-tests/* # Generate overloaded intrinsic testing C source files llvm-overloaded-test: - $(call gen_tests,$(DIR)/llvm-overloaded-tests,overloaded-test,--toolchain-type llvm) - $(call gen_tests,$(POLICY_DIR)/llvm-overloaded-tests,overloaded-test,--toolchain-type llvm --has-policy) + $(call gen_tests,$(DIR)/llvm-overloaded-tests,overloaded-test,--toolchain-type llvm $(EXTRA_FLAG)) + $(call gen_tests,$(POLICY_DIR)/llvm-overloaded-tests,overloaded-test,--toolchain-type llvm --has-policy $(EXTRA_FLAG)) $(call replace_float, $(DIR)/llvm-overloaded-tests) $(call replace_float, $(POLICY_DIR)/llvm-overloaded-tests) clang-format -i $(DIR)/llvm-overloaded-tests/* @@ -221,21 +249,136 @@ gnu-overloaded-test: $(call gen_tests,$(DIR)/gnu-overloaded-tests,overloaded-test,--toolchain-type gnu) $(call gen_tests,$(POLICY_DIR)/gnu-overloaded-tests,overloaded-test,--toolchain-type gnu --has-policy) +# BFloat16 documents +bf16-non-overloaded-doc: + $(call gen_doc, $(BF16_DIR),intrinsic_funcs.adoc,non-overloaded-doc,--skip-default-inst --vendor-inst $(BF16_INST)) + $(call gen_doc, $(BF16_POLICY_DIR),intrinsic_funcs.adoc,non-overloaded-doc,--has-policy --skip-default-inst --vendor-inst $(BF16_INST)) + $(call clang_format_adoc, --file, $(BF16_DIR)/intrinsic_funcs.adoc) + $(call clang_format_adoc, --file, $(BF16_POLICY_DIR)/intrinsic_funcs.adoc) + +bf16-non-overloaded-docs: + $(call gen_doc, $(BF16_DIR),intrinsic_funcs,non-overloaded-docs,--skip-default-inst --vendor-inst $(BF16_INST)) + $(call gen_doc, $(BF16_POLICY_DIR),intrinsic_funcs,non-overloaded-docs,--has-policy --skip-default-inst --vendor-inst $(BF16_INST)) + $(call clang_format_adoc, --folder, $(BF16_DIR)/intrinsic_funcs) + $(call clang_format_adoc, --folder, $(BF16_POLICY_DIR)/intrinsic_funcs) + +bf16-overloaded-doc: + $(call gen_doc, $(BF16_DIR),overloaded_intrinsic_funcs.adoc,overloaded-doc,--skip-default-inst --vendor-inst $(BF16_INST)) + $(call gen_doc, $(BF16_POLICY_DIR),overloaded_intrinsic_funcs.adoc,overloaded-doc,--has-policy --skip-default-inst --vendor-inst $(BF16_INST)) + $(call clang_format_adoc, --file, $(BF16_DIR)/overloaded_intrinsic_funcs.adoc) + $(call clang_format_adoc, --file, $(BF16_POLICY_DIR)/overloaded_intrinsic_funcs.adoc) + +bf16-overloaded-docs: + $(call gen_doc, $(BF16_DIR),overloaded_intrinsic_funcs,overloaded-docs,--skip-default-inst --vendor-inst $(BF16_INST)) + $(call gen_doc, $(BF16_POLICY_DIR),overloaded_intrinsic_funcs,overloaded-docs,--has-policy --skip-default-inst --vendor-inst $(BF16_INST)) + $(call clang_format_adoc, --folder, $(BF16_DIR)/overloaded_intrinsic_funcs) + $(call clang_format_adoc, --folder, $(BF16_POLICY_DIR)/overloaded_intrinsic_funcs) + +# BFloat16 tests +# Generate non-overloaded intrinsic testing C source files +bf16-non-overloaded-test: + $(call gen_tests,$(BF16_DIR)/api-testing,non-overloaded-test,--skip-default-inst --vendor-inst $(BF16_INST)) + $(call gen_tests,$(BF16_POLICY_DIR)/api-testing,non-overloaded-test,--has-policy --skip-default-inst --vendor-inst $(BF16_INST)) + clang-format -i $(BF16_DIR)/api-testing/* + clang-format -i $(BF16_POLICY_DIR)/api-testing/* + +# Generate overloaded intrinsic testing C source files +bf16-overloaded-test: + $(call gen_tests,$(BF16_DIR)/overloaded-api-testing,overloaded-test,--skip-default-inst --vendor-inst $(BF16_INST)) + $(call gen_tests,$(BF16_POLICY_DIR)/overloaded-api-testing,overloaded-test,--has-policy --skip-default-inst --vendor-inst $(BF16_INST)) + clang-format -i $(BF16_DIR)/overloaded-api-testing/* + clang-format -i $(BF16_POLICY_DIR)/overloaded-api-testing/* + +# Generate non-overloaded intrinsic testing C source files +bf16-llvm-non-overloaded-test: + $(call gen_tests,$(BF16_DIR)/llvm-api-tests,non-overloaded-test,--toolchain-type llvm --skip-default-inst --vendor-inst $(BF16_INST)) + $(call gen_tests,$(BF16_POLICY_DIR)/llvm-api-tests,non-overloaded-test,--toolchain-type llvm --has-policy --skip-default-inst --vendor-inst $(BF16_INST)) + $(call replace_float, $(BF16_DIR)/llvm-api-tests) + $(call replace_float, $(BF16_POLICY_DIR)/llvm-api-tests) + clang-format -i $(BF16_DIR)/llvm-api-tests/* + clang-format -i $(BF16_POLICY_DIR)/llvm-api-tests/* + +# Generate overloaded intrinsic testing C source files +bf16-llvm-overloaded-test: + $(call gen_tests,$(BF16_DIR)/llvm-overloaded-tests,overloaded-test,--toolchain-type llvm --skip-default-inst --vendor-inst $(BF16_INST)) + $(call gen_tests,$(BF16_POLICY_DIR)/llvm-overloaded-tests,overloaded-test,--toolchain-type llvm --has-policy --skip-default-inst --vendor-inst $(BF16_INST)) + $(call replace_float, $(BF16_DIR)/llvm-overloaded-tests) + $(call replace_float, $(BF16_POLICY_DIR)/llvm-overloaded-tests) + clang-format -i $(BF16_DIR)/llvm-overloaded-tests/* + clang-format -i $(BF16_POLICY_DIR)/llvm-overloaded-tests/* + +# Vector crypto documents +vector-crypto-non-overloaded-doc: + $(call gen_doc,$(VECTOR_CRYPTO_DIR),intrinsic_funcs.adoc,non-overloaded-doc,--gen-vector-crypto $(EXTRA_FLAG)) + $(call gen_doc,$(VECTOR_CRYPTO_POLICY_DIR),intrinsic_funcs.adoc,non-overloaded-doc,--gen-vector-crypto --has-policy $(EXTRA_FLAG)) + $(call clang_format_adoc, --file, $(VECTOR_CRYPTO_DIR)/intrinsic_funcs.adoc) + $(call clang_format_adoc, --file, $(VECTOR_CRYPTO_POLICY_DIR)/intrinsic_funcs.adoc) + +vector-crypto-non-overloaded-docs: + $(call gen_doc,$(VECTOR_CRYPTO_DIR),intrinsic_funcs,non-overloaded-docs,--gen-vector-crypto $(EXTRA_FLAG)) + $(call gen_doc,$(VECTOR_CRYPTO_POLICY_DIR),intrinsic_funcs,non-overloaded-docs,--gen-vector-crypto --has-policy $(EXTRA_FLAG)) + $(call clang_format_adoc, --folder, $(VECTOR_CRYPTO_DIR)/intrinsic_funcs) + $(call clang_format_adoc, --folder, $(VECTOR_CRYPTO_POLICY_DIR)/intrinsic_funcs) + +vector-crypto-overloaded-doc: + $(call gen_doc,$(VECTOR_CRYPTO_DIR),overloaded_intrinsic_funcs.adoc,overloaded-doc,--gen-vector-crypto $(EXTRA_FLAG)) + $(call gen_doc,$(VECTOR_CRYPTO_POLICY_DIR),overloaded_intrinsic_funcs.adoc,overloaded-doc,--gen-vector-crypto --has-policy $(EXTRA_FLAG)) + $(call clang_format_adoc, --file, $(VECTOR_CRYPTO_DIR)/overloaded_intrinsic_funcs.adoc) + $(call clang_format_adoc, --file, $(VECTOR_CRYPTO_POLICY_DIR)/overloaded_intrinsic_funcs.adoc) + +vector-crypto-overloaded-docs: + $(call gen_doc,$(VECTOR_CRYPTO_DIR),overloaded_intrinsic_funcs,overloaded-docs,--gen-vector-crypto $(EXTRA_FLAG)) + $(call gen_doc,$(VECTOR_CRYPTO_POLICY_DIR),overloaded_intrinsic_funcs,overloaded-docs,--gen-vector-crypto --has-policy $(EXTRA_FLAG)) + $(call clang_format_adoc, --folder, $(VECTOR_CRYPTO_DIR)/overloaded_intrinsic_funcs) + $(call clang_format_adoc, --folder, $(VECTOR_CRYPTO_POLICY_DIR)/overloaded_intrinsic_funcs) + +# Vector-crypto tests +vector-crypto-non-overloaded-test: + $(call gen_tests,$(VECTOR_CRYPTO_DIR)/api-testing,non-overloaded-test,--gen-vector-crypto $(EXTRA_FLAG)) + $(call gen_tests,$(VECTOR_CRYPTO_POLICY_DIR)/api-testing,non-overloaded-test,--gen-vector-crypto --has-policy $(EXTRA_FLAG)) + clang-format -i $(VECTOR_CRYPTO_DIR)/api-testing/* + clang-format -i $(VECTOR_CRYPTO_POLICY_DIR)/api-testing/* + +vector-crypto-overloaded-test: + $(call gen_tests,$(VECTOR_CRYPTO_DIR)/overloaded-api-testing,overloaded-test,--gen-vector-crypto $(EXTRA_FLAG)) + $(call gen_tests,$(VECTOR_CRYPTO_POLICY_DIR)/overloaded-api-testing,overloaded-test,--gen-vector-crypto --has-policy $(EXTRA_FLAG)) + clang-format -i $(VECTOR_CRYPTO_DIR)/overloaded-api-testing/* + clang-format -i $(VECTOR_CRYPTO_POLICY_DIR)/overloaded-api-testing/* + +vector-crypto-llvm-non-overloaded-test: + $(call gen_tests,$(VECTOR_CRYPTO_DIR)/llvm-api-tests,non-overloaded-test,--toolchain-type llvm --gen-vector-crypto $(EXTRA_FLAG)) + $(call gen_tests,$(VECTOR_CRYPTO_POLICY_DIR)/llvm-api-tests,non-overloaded-test,--toolchain-type llvm --gen-vector-crypto --has-policy $(EXTRA_FLAG)) + $(call replace_float, $(VECTOR_CRYPTO_DIR)/llvm-api-tests) + $(call replace_float, $(VECTOR_CRYPTO_POLICY_DIR)/llvm-api-tests) + clang-format -i $(VECTOR_CRYPTO_DIR)/llvm-api-tests/* + clang-format -i $(VECTOR_CRYPTO_POLICY_DIR)/llvm-api-tests/* + +vector-crypto-llvm-overloaded-test: + $(call gen_tests,$(VECTOR_CRYPTO_DIR)/llvm-overloaded-tests,overloaded-test,--toolchain-type llvm --gen-vector-crypto $(EXTRA_FLAG)) + $(call gen_tests,$(VECTOR_CRYPTO_POLICY_DIR)/llvm-overloaded-tests,overloaded-test,--toolchain-type llvm --gen-vector-crypto --has-policy $(EXTRA_FLAG)) + $(call replace_float, $(VECTOR_CRYPTO_DIR)/llvm-overloaded-tests) + $(call replace_float, $(VECTOR_CRYPTO_POLICY_DIR)/llvm-overloaded-tests) + clang-format -i $(VECTOR_CRYPTO_DIR)/llvm-overloaded-tests/* + clang-format -i $(VECTOR_CRYPTO_POLICY_DIR)/llvm-overloaded-tests/* + +############################################################################### + # Generate the adaptor header for v0.10 non-policy-compatible-header: - $(call gen_doc,$(DIR)/rvv-v0p10-compatible-headers,non-policy.h,non-overloaded-compatible-header,) + $(call gen_doc,$(DIR)/rvv-v0p10-compatible-headers,non-policy.h,non-overloaded-compatible-header,$(EXTRA_FLAG)) + clang-format -i $(DIR)/rvv-v0p10-compatible-headers/non-policy.h + policy-compatible-header: - $(call gen_doc,$(DIR)/rvv-v0p10-compatible-headers,policy.h,non-overloaded-compatible-header,--has-policy) - clang-format -i $(DIR)/rvv-v0p10-compatible-headers/* + $(call gen_doc,$(DIR)/rvv-v0p10-compatible-headers,policy.h,non-overloaded-compatible-header,--has-policy $(EXTRA_FLAG)) + clang-format -i $(DIR)/rvv-v0p10-compatible-headers/policy.h non-policy-overloaded-compatible-header: - $(call gen_doc,$(DIR)/rvv-v0p10-compatible-headers,overloaded-non-policy.h,overloaded-compatible-header,) - clang-format -i $(DIR)/rvv-v0p10-compatible-headers/* + $(call gen_doc,$(DIR)/rvv-v0p10-compatible-headers,overloaded-non-policy.h,overloaded-compatible-header,$(EXTRA_FLAG)) + clang-format -i $(DIR)/rvv-v0p10-compatible-headers/overloaded-non-policy.h policy-overloaded-compatible-header: - $(call gen_doc,$(DIR)/rvv-v0p10-compatible-headers,overloaded-policy.h,overloaded-compatible-header,--has-policy) - clang-format -i $(DIR)/rvv-v0p10-compatible-headers/* - + $(call gen_doc,$(DIR)/rvv-v0p10-compatible-headers,overloaded-policy.h,overloaded-compatible-header,--has-policy $(EXTRA_FLAG)) + clang-format -i $(DIR)/rvv-v0p10-compatible-headers/overloaded-policy.h ############################################################################### # Auto-generated Document / Test Targets @@ -251,18 +394,46 @@ git-commit-all: make git-commit-autogen-doc OUTPUT_DIR=${OUTPUT_DIR} make git-commit-autogen-test OUTPUT_DIR=${OUTPUT_DIR} +git-commit-bf16-all: + make git-commit-autogen-bf16-doc OUTPUT_DIR=${OUTPUT_DIR} + make git-commit-autogen-bf16-test OUTPUT_DIR=${OUTPUT_DIR} + +git-commit-vector-crypto-all: + make git-commit-autogen-vector-crypto-doc OUTPUT_DIR=${OUTPUT_DIR} + make git-commit-autogen-vector-crypto-test OUTPUT_DIR=${OUTPUT_DIR} + # Update and commit all documents under auto-generated git-commit-autogen-doc: make gen-document OUTPUT_DIR=${OUTPUT_DIR} git add ${DIR}/* git commit -m "[Auto-gen] Update documents under ${OUTPUT_DIR}. (make git-commit-autogen-doc)" +git-commit-autogen-bf16-doc: + make gen-bf16-document OUTPUT_DIR=${OUTPUT_DIR} + git add ${BF16_DIR}/* + git commit -m "[Auto-gen] Update bfloat16 documents under ${OUTPUT_DIR}. (make git-commit-autogen-bf16-doc)" + +git-commit-autogen-vector-crypto-doc: + make gen-vector-crypto-document OUTPUT_DIR=${OUTPUT_DIR} + git add ${VECTOR_CRYPTO_DIR}/* + git commit -m "[Auto-gen] Update vector crypto documents under ${OUTPUT_DIR}. (make git-commit-autogen-vector-crypto-doc)" + # Update and commit all testing C source files under auto-generated git-commit-autogen-test: make gen-test git add ${DIR}/* git commit -m "[Auto-gen] Update tests under ${OUTPUT_DIR}. (make git-commit-autogen-test)" +git-commit-autogen-bf16-test: + make gen-bf16-test + git add ${BF16_DIR}/* + git commit -m "[Auto-gen] Update bfloat16 tests under ${OUTPUT_DIR}. (make git-commit-autogen-bf16-test)" + +git-commit-autogen-vector-crypto-test: + make gen-vector-crypto-test + git add ${VECTOR_CRYPTO_DIR}/* + git commit -m "[Auto-gen] Update vector crypto tests under ${OUTPUT_DIR}. (make git-commit-autogen-vector-crypto-test)" + # Update and commit compatible headers under auto-generated git-commit-autogen-compatible-header: make gen-compatible-header @@ -276,6 +447,7 @@ diff-autogen: $(call check_defined, TEST_DIR, output directory for documents/tests generation) rm -rf ${abspath ${TEST_DIR}} make OUTPUT_DIR=${TEST_DIR} + diff -qr ${TEST_DIR} ${GOLDEN_DIR} ############################################################################### @@ -326,6 +498,38 @@ run-policy-overloaded-compatible-api-testing: $(LEGACY_API_TESTS_DIR)/policy-overloaded-api-testing $(call run_tests,$(LEGACY_API_TESTS_DIR)/policy-overloaded-api-testing,${COMPILER}) +run-bfloat16-api-testing: + $(call check_defined, COMPILER, compiler (clang/gcc)) + $(call run_tests,${DIR}/bfloat16/api-testing,${COMPILER}) + +run-bfloat16-overloaded-api-testing: + $(call check_defined, COMPILER, compiler (clang/gcc)) + $(call run_tests,${DIR}/bfloat16/overloaded-api-testing,${COMPILER}) + +run-bfloat16-policy-api-testing: + $(call check_defined, COMPILER, compiler (clang/gcc)) + $(call run_tests,${DIR}/bfloat16/policy_funcs/api-testing,${COMPILER}) + +run-bfloat16-policy-overloaded-api-testing: + $(call check_defined, COMPILER, compiler (clang/gcc)) + $(call run_tests,${DIR}/bfloat16/policy_funcs/overloaded-api-testing,${COMPILER}) + +run-vector-crypto-api-testing: + $(call check_defined, COMPILER, compiler (clang/gcc)) + $(call run_tests,${DIR}/vector-crypto/api-testing,${COMPILER}) + +run-vector-crypto-overloaded-api-testing: + $(call check_defined, COMPILER, compiler (clang/gcc)) + $(call run_tests,${DIR}/vector-crypto/overloaded-api-testing,${COMPILER}) + +run-vector-crypto-policy-api-testing: + $(call check_defined, COMPILER, compiler (clang/gcc)) + $(call run_tests,${DIR}/vector-crypto/policy_funcs/api-testing,${COMPILER}) + +run-vector-crypto-policy-overloaded-api-testing: + $(call check_defined, COMPILER, compiler (clang/gcc)) + $(call run_tests,${DIR}/vector-crypto/policy_funcs/overloaded-api-testing,${COMPILER}) + # A parameterized target to run testing through testing-report. # Makes target 'test' of ${API_MAKEFILE} with ${TESTING_REPORT_SCRIPT} under # ${API_DIR}. Requires ${API_DIR}, ${API_MAKEFILE}, ${TESTING_REPORT_SCRIPT} @@ -435,3 +639,13 @@ update-clang-test: cp $(OUTPUT_DIR)/llvm-overloaded-tests/*.c $(CLANG_TEST_DIR)/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/ cp $(OUTPUT_DIR)/policy_funcs/llvm-api-tests/*.c $(CLANG_TEST_DIR)/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/ cp $(OUTPUT_DIR)/policy_funcs/llvm-overloaded-tests/*.c $(CLANG_TEST_DIR)/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/ + + cp $(OUTPUT_DIR)/bfloat16/llvm-api-tests/*.c $(CLANG_TEST_DIR)/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/bfloat16/ + cp $(OUTPUT_DIR)/bfloat16/llvm-overloaded-tests/*.c $(CLANG_TEST_DIR)/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/bfloat16/ + cp $(OUTPUT_DIR)/bfloat16/policy_funcs/llvm-api-tests/*.c $(CLANG_TEST_DIR)/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/bfloat16/ + cp $(OUTPUT_DIR)/bfloat16/policy_funcs/llvm-overloaded-tests/*.c $(CLANG_TEST_DIR)/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/bfloat16/ + + cp $(OUTPUT_DIR)/vector-crypto/llvm-api-tests/*.c $(CLANG_TEST_DIR)/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/ + cp $(OUTPUT_DIR)/vector-crypto/llvm-overloaded-tests/*.c $(CLANG_TEST_DIR)/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/ + cp $(OUTPUT_DIR)/vector-crypto/policy_funcs/llvm-api-tests/*.c $(CLANG_TEST_DIR)/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/ + cp $(OUTPUT_DIR)/vector-crypto/policy_funcs/llvm-overloaded-tests/*.c $(CLANG_TEST_DIR)/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/ diff --git a/rvv-intrinsic-generator/Makefile.api b/rvv-intrinsic-generator/Makefile.api index 79ed230f9..94affa72c 100644 --- a/rvv-intrinsic-generator/Makefile.api +++ b/rvv-intrinsic-generator/Makefile.api @@ -14,8 +14,8 @@ # limitations under the License. ############################################################################### -CFLAGS?=-O -Werror=implicit-function-declaration -ARCH_FLAG?=-march=rv64gcv_zfh_zvfh +CFLAGS?=-O -Werror=implicit-function-declaration -menable-experimental-extensions +ARCH_FLAG?=-march=rv64gcv_zfh_zvbb_zvbc_zvfbfmin_zvfbfwma_zvfh_zvkng_zvksg_zvl512b EXTRA_CFLAGS?= TEST_MULTILIB:=rv32gcv-ilp32d,rv64gcv-lp64d diff --git a/rvv-intrinsic-generator/requirements.txt b/rvv-intrinsic-generator/requirements.txt index 3299a96b8..9e069ba22 100644 --- a/rvv-intrinsic-generator/requirements.txt +++ b/rvv-intrinsic-generator/requirements.txt @@ -1,4 +1,4 @@ junitparser==2.6.0 -pylint==2.14.1 +pylint==3.2.3 yapf pytype diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/bfloat16_inst.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/bfloat16_inst.py new file mode 100644 index 000000000..77e47908c --- /dev/null +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/bfloat16_inst.py @@ -0,0 +1,170 @@ +""" +-------------------------------------------------------------------------------- +Copyright 2023 SiFive Inc + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +-------------------------------------------------------------------------------- + +Declares the BFloat16 intrinsics and links to the templates for its +realization into function prototype. The documents are generated under the +sequence and grouping. +""" + +from intrinsic_decorator import IntrinsicDecorators +from generator import CompatibleHeaderGenerator +from templates import load_template +from templates import seg_load_template +from templates import store_template +from templates import seg_store_template +from templates import reint_op_template +from templates import get_set_diff_lmul_op_template +from templates import misc_op_template +from templates import unary_op_template +from templates import cvt_op_template +from templates import mac_template +from constants import LMULS, WLMULS, NCVTLMULS, BFTYPES + +SEWS = [16] +NSEWS = [32] + + +def gen(g): + if isinstance(g, CompatibleHeaderGenerator): + assert False, "BFloat16 intrinsics is supported after v1.0" + decorators = IntrinsicDecorators(g.has_tail_policy) + + #################################################################### + g.start_group("BFloat16 Vector Loads and Stores Intrinsics") + + g.function_group(load_template, "Vector Unit-Stride Load Intrinsics", + "bf16-vector-unit-stride-load", ["vle"], BFTYPES, SEWS, + LMULS, decorators.has_masking_maskedoff_policy) + + g.function_group(store_template, "Vector Unit-Stride Store Intrinsics", + "bf16-vector-unit-stride-store", ["vse"], BFTYPES, SEWS, + LMULS, decorators.has_masking_no_maskedoff) + + g.function_group(load_template, "Vector Strided Load Intrinsics", + "vector-strided-load", ["vlse"], BFTYPES, SEWS, LMULS, + decorators.has_masking_maskedoff_policy) + + g.function_group(store_template, "Vector Strided Store Intrinsics", + "vector-strided-store", ["vsse"], BFTYPES, SEWS, LMULS, + decorators.has_masking_no_maskedoff) + + g.function_group(load_template, "Vector Indexed Load Intrinsics", + "vector-indexed-load", ["vloxei", "vluxei"], BFTYPES, SEWS, + LMULS, decorators.has_masking_maskedoff_policy) + + g.function_group(store_template, "Vector Indexed Store Intrinsics", + "vector-indexed-store", ["vsoxei", "vsuxei"], BFTYPES, SEWS, + LMULS, decorators.has_masking_no_maskedoff) + + g.function_group(load_template, + "Unit-stride Fault-Only-First Loads Intrinsics", + "unit-stride-fault-only-first-loads", ["vleff"], BFTYPES, + SEWS, LMULS, decorators.has_masking_maskedoff_policy) + + #################################################################### + g.start_group("BFloat16 Vector Loads and Stores Segment Intrinsics") + + g.function_group(seg_load_template, + "Vector Unit-Stride Segment Load Intrinsics", + "vector-unit-stride-segment-load", ["vlseg", "vlsegff"], + BFTYPES, SEWS, LMULS, + decorators.has_masking_maskedoff_policy) + + g.function_group(seg_store_template, + "Vector Unit-Stride Segment Store Intrinsics", + "vecrtor-unit-stride-segment-store", ["vsseg"], BFTYPES, + SEWS, LMULS, decorators.has_masking_no_maskedoff) + + g.function_group(seg_load_template, "Vector Strided Segment Load Intrinsics", + "vector-strided-segment-load", ["vlsseg"], BFTYPES, SEWS, + LMULS, decorators.has_masking_maskedoff_policy) + + g.function_group(seg_store_template, + "Vector Strided Segment Store Intrinsics", + "vector-strided-segment-store", ["vssseg"], BFTYPES, SEWS, + LMULS, decorators.has_masking_no_maskedoff) + + g.function_group(seg_load_template, "Vector Indexed Segment Load Intrinsics", + "vector-indexed-segment-load", ["vloxseg", "vluxseg"], + BFTYPES, SEWS, LMULS, + decorators.has_masking_maskedoff_policy) + + g.function_group(seg_store_template, + "Vector Indexed Segment Store Intrinsics", + "vector-indexed-segment-store", ["vsoxseg", "vsuxseg"], + BFTYPES, SEWS, LMULS, decorators.has_masking_no_maskedoff) + + #################################################################### + g.start_group("BFloat16 Convert Intrinsics") + + g.function_group(cvt_op_template, "Vector Narrowing Convert Intrinsics", + "bf16-vector-narrow-convert", ["ncvtbf16"], "bfloat16", + NSEWS, NCVTLMULS, + decorators.has_masking_maskedoff_policy_frm) + + g.function_group(cvt_op_template, "Vector Widening Convert Intrinsics", + "bf16-vector-widening-convert", ["wcvtbf16"], "bfloat16", + SEWS, WLMULS, decorators.has_masking_maskedoff_policy) + + #################################################################### + g.start_group("BFloat16 Arithmetic Intrinsics") + + g.function_group(mac_template, + "Vector Widening Multiply-Accumulate Intrinsics", + "bf16-widening-multiply-accumulate", ["wmaccbf16"], BFTYPES, + SEWS, WLMULS, decorators.has_masking_no_maskedoff_policy_frm) + g.function_group(unary_op_template, "Vector BFloat16 Move Intrinsics", + "vector-bf16-move", ["mv"], BFTYPES, SEWS, LMULS, + decorators.has_no_masking_policy) + + g.function_group(unary_op_template, "Vector BFloat16 Merge Intrinsics", + "vector-bf16-merge", ["merge"], BFTYPES, SEWS, LMULS, + decorators.has_no_masking_policy) + + #################################################################### + g.start_group("BFloat16 Miscellaneous Vector Utility Intrinsics") + + g.function_group(reint_op_template, "Reinterpret Cast Conversion Intrinsics", + "reinterpret-cast-conversion", ["vreinterpret"], "bfloat16", + SEWS, LMULS, decorators.has_no_masking) + + g.function_group(misc_op_template, "Vector LMUL Extension Intrinsics", + "vector-lmul-extensionn", ["vlmul_ext_v"], BFTYPES, SEWS, + LMULS, decorators.has_no_masking) + + g.function_group(misc_op_template, "Vector LMUL Truncation Intrinsics", + "vector-lmul-truncation", ["vlmul_trunc_v"], BFTYPES, SEWS, + LMULS, decorators.has_no_masking) + + g.function_group(misc_op_template, "Vector Initialization Intrinsics", + "vector-initialization", ["vundefined"], BFTYPES, SEWS, + LMULS, decorators.has_no_masking) + + g.function_group(get_set_diff_lmul_op_template, "Vector Insertion Intrinsics", + "vector-insertion", ["vset"], BFTYPES, SEWS, LMULS, + decorators.has_no_masking) + + g.function_group(get_set_diff_lmul_op_template, + "Vector Extraction Intrinsics", "vector-extraction", + ["vget"], BFTYPES, SEWS, LMULS, decorators.has_no_masking) + + g.function_group(misc_op_template, "Vector Creation Intrinsics", + "vector-creation", ["vcreate"], BFTYPES, SEWS, LMULS, + decorators.has_no_masking) + + #################################################################### + g.gen_prologue() diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/constants.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/constants.py index e2ae21964..0895181eb 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/constants.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/constants.py @@ -28,6 +28,8 @@ NSEWS = [16, 32, 64] TYPES = ["float", "int", "uint"] ITYPES = ["int", "uint"] +UITYPE = ["uint"] FTYPES = ["float"] +BFTYPES = ["bfloat"] MTYPES = ["bool"] MLENS = [1, 2, 4, 8, 16, 32, 64] diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/enums.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/enums.py index d0ade6014..bedc390f4 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/enums.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/enums.py @@ -145,7 +145,8 @@ def __init__(self, inst_type=InstType.UNKNOWN, mem_type=MemType.NO_MEM, extra_attr=ExtraAttr.NO_ATTR, - NF=1): + NF=1, + required_ext=None): #pylint: disable=invalid-name self.SEW = SEW self.LMUL = LMUL @@ -154,6 +155,9 @@ def __init__(self, self.mem_type = mem_type self.extra_attr = extra_attr self.NF = NF + if required_ext is None: + required_ext = [] + self.required_ext = sorted(required_ext) def load_p(self): return self.mem_type == MemType.LOAD @@ -169,19 +173,56 @@ def get(args, decorator, inst_type, mem_type=MemType.NO_MEM, - extra_attr=ExtraAttr.NO_ATTR): + extra_attr=ExtraAttr.NO_ATTR, + required_ext=None): if decorator is None: # vsetvl and vsetvlmax - return InstInfo(args["SEW"], args["LMUL"], args["OP"], inst_type, - mem_type, extra_attr) + return InstInfo( + args["SEW"], + args["LMUL"], + args["OP"], + inst_type, + mem_type, + extra_attr, + required_ext=required_ext) elif "SEW" in args: if "NF" in args: - return InstInfo(args["SEW"], args["LMUL"], args["OP"], inst_type, - mem_type, extra_attr | decorator.flags, args["NF"]) + return InstInfo( + args["SEW"], + args["LMUL"], + args["OP"], + inst_type, + mem_type, + extra_attr | decorator.flags, + args["NF"], + required_ext=required_ext) else: - return InstInfo(args["SEW"], args["LMUL"], args["OP"], inst_type, - mem_type, extra_attr | decorator.flags) + return InstInfo( + args["SEW"], + args["LMUL"], + args["OP"], + inst_type, + mem_type, + extra_attr | decorator.flags, + required_ext=required_ext) else: # For mask operation - return InstInfo(0, 0, args["OP"], inst_type, mem_type, - extra_attr | decorator.flags) + return InstInfo( + 0, + 0, + args["OP"], + inst_type, + mem_type, + extra_attr | decorator.flags, + required_ext=required_ext) + + def get_required_exts(self) -> list: + return sorted(self.required_ext) + + def add_required_ext(self, ext: str) -> None: + if ext not in self.required_ext: + self.required_ext.append(ext) + + def remove_required_ext(self, ext: str) -> None: + if ext in self.required_ext: + self.required_ext.remove(ext) diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py index 6acf8402f..689de8a97 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py @@ -17,6 +17,7 @@ Generator classes that controls structures of the output. """ +from abc import ABC, abstractmethod import os import collections import re @@ -25,7 +26,7 @@ from enums import ToolChainType -class Generator(): +class Generator(ABC): """ Base class for all generators. """ @@ -36,32 +37,35 @@ def __init__(self): pass def write(self, text): - pass + raise NotImplementedError def write_title(self, text, link): - pass + raise NotImplementedError def gen_prologue(self): pass def inst_group_prologue(self): - return "" + raise NotImplementedError def inst_group_epilogue(self): - return "" + raise NotImplementedError + @abstractmethod def func(self, inst_info, name, return_type, **kwargs): - # pylint: disable=unused-argument - # FIXME: inst_info is currently only used by RIFGenerator. - self.generated_functions_set.add(name) - args = ", ".join(map(lambda a: f"{a[1]} {a[0]}", kwargs.items())) - # "T * name" to "T *name" - args = args.replace("* ", "*") - s = f"{return_type} {name} ({args});\n" - return s - - def function_group(self, template, title, link, op_list, type_list, sew_list, - lmul_list, decorator_list): + return NotImplemented + + def function_group(self, + template, + title, + link, + op_list, + type_list, + sew_list, + lmul_list, + decorator_list, + description=None, + required_ext_list=None): # pylint: disable=unused-argument # NOTE: 'title' and 'link' are only used in DocGenerator and # OverloadedDocGenerator. Probably need some decoupling here. @@ -71,10 +75,12 @@ def function_group(self, template, title, link, op_list, type_list, sew_list, type_list=type_list, sew_list=sew_list, lmul_list=lmul_list, - decorator_list=decorator_list) + decorator_list=decorator_list, + description=description, + required_ext_list=required_ext_list) def start_group(self, group_name): - pass + raise NotImplementedError @staticmethod def func_name(name): @@ -82,6 +88,7 @@ def func_name(name): name = name.replace("_int", "_i") name = name.replace("_float", "_f") name = name.replace("_bool", "_b") + name = name.replace("_bfloat", "_bf") # Follows the naming guideline under riscv-c-api-doc to add the `__riscv_` # suffix for all RVV intrinsics. name = "__riscv_" + name @@ -261,7 +268,8 @@ def get_overloaded_op_name(name): overloaded_name = "_".join([sn[0], sn[1], sn[-1]]) elif any(op in name for op in [ "vzext", "vsext", "vwadd", "vwsub", "vfwadd", "vfwsub", "vwadd", - "vwsub", "vfwadd", "vfwsub", "vmv", "vfmv" + "vwsub", "vfwadd", "vfwsub", "vmv", "vfmv", "vsm4r", "vaesef", "vaesem", + "vaesdf", "vaesdm" ]): # 2. compiler can not distinguish *.wx and *.vx, need encode them in # suffix, for example: @@ -295,6 +303,9 @@ def report_summary(self): \x1b[0mfunctions") def post_gen(self): + raise NotImplementedError + + def emit_function_group_description(self, description): pass @@ -318,7 +329,7 @@ def __init__(self, f, is_all_in_one, has_tail_policy): if not os.path.exists(self.folder): os.makedirs(self.folder) if not os.path.isdir(self.folder): - raise Exception("%s not dir, but it must be a dir.") + raise FileNotFoundError(f"{self.folder} not dir, but it must be a dir.") self.group_counter = 0 self.fd = None @@ -341,8 +352,17 @@ def inst_group_epilogue(self): self.write(s) return s - def function_group(self, template, title, link, op_list, type_list, sew_list, - lmul_list, decorator_list): + def function_group(self, + template, + title, + link, + op_list, + type_list, + sew_list, + lmul_list, + decorator_list, + description=None, + required_ext_list=None): self.write_title(title, link) if self.has_tail_policy and len(decorator_list) == 0: s = "Intrinsics here don't have a policy variant.\n" @@ -352,12 +372,27 @@ def function_group(self, template, title, link, op_list, type_list, sew_list, self.write("Intrinsics here don't have an overloaded variant.\n") return - super().function_group(template, title, link, op_list, type_list, sew_list, - lmul_list, decorator_list) + super().function_group( + template, + title, + link, + op_list, + type_list, + sew_list, + lmul_list, + decorator_list, + description=description, + required_ext_list=required_ext_list) def func(self, inst_info, name, return_type, **kwargs): name = Generator.func_name(name) - s = super().func(inst_info, name, return_type, **kwargs) + # pylint: disable=unused-argument + # FIXME: inst_info is currently only used by RIFGenerator. + self.generated_functions_set.add(name) + args = ", ".join(map(lambda a: f"{a[1]} {a[0]}", kwargs.items())) + # "T * name" to "T *name" + args = args.replace("* ", "*") + s = f"{return_type} {name} ({args});\n" self.write(s) def start_group(self, group_name): @@ -365,7 +400,6 @@ def start_group(self, group_name): # NOTE: If is_all_in_one is False, separate files of the grouped intrinsics # will be created, therefore we are allowing overriding the file descriptor # here. - super().start_group(group_name) if not self.is_all_in_one: file_name = f"{self.group_counter:02d}_{group_name}.adoc" file_name = file_name.replace(" ", "_") @@ -381,6 +415,10 @@ def start_group(self, group_name): os.path.join(self.folder, file_name), "w", encoding="utf-8") self.write(f"\n=== {group_name}\n") + def emit_function_group_description(self, description): + if description: + self.write(f"{description}\n") + class OverloadedDocGenerator(DocGenerator): """ @@ -394,14 +432,32 @@ def write_title(self, text, link): else: self.fd.write("\n[[overloaded-" + link + "]]\n==== " + text + "\n") - def function_group(self, template, title, link, op_list, type_list, sew_list, - lmul_list, decorator_list): + def function_group(self, + template, + title, + link, + op_list, + type_list, + sew_list, + lmul_list, + decorator_list, + description=None, + required_ext_list=None): self.do_not_have_overloaded_variant = True for op in op_list: if Generator.is_support_overloaded(op): self.do_not_have_overloaded_variant = False - super().function_group(template, title, link, op_list, type_list, sew_list, - lmul_list, decorator_list) + super().function_group( + template, + title, + link, + op_list, + type_list, + sew_list, + lmul_list, + decorator_list, + description=description, + required_ext_list=required_ext_list) def func(self, inst_info, name, return_type, **kwargs): func_name = Generator.func_name(name) @@ -435,36 +491,84 @@ def __init__(self, f, is_overloaded, toolchain_type, has_tail_policy): if not os.path.exists(self.folder): os.makedirs(self.folder) if not os.path.isdir(self.folder): - raise Exception("%s not dir, but it must be a dir.") + raise FileNotFoundError(f"{self.folder} not dir, but it must be a dir.") self.fd = None self.test_files = [] # test file name candidates which are declared in inst.py, it could have # different op name self.test_file_names = [] - def write_file_header(self, has_float_type): + def write(self, text): + pass + + def start_group(self, group_name): + pass + + def inst_group_prologue(self): + return "" + + def inst_group_epilogue(self): + return "" + + def write_file_header(self, has_float_type, has_bfloat16_type, requires_exts): #pylint: disable=line-too-long - int_llvm_header = (r"""// REQUIRES: riscv-registered-target + dynamic_llvm_header_prologue = r"""// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -disable-O0-optnone \ +""" + + dynamic_llvm_header_epilogue = r"""// RUN: -target-feature +experimental \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +""" + + int_llvm_header = r"""// REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s -""") - float_llvm_header = (r"""// REQUIRES: riscv-registered-target +""" + float_llvm_header = r"""// REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ -// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -target-feature +zvfh -disable-O0-optnone \ // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ // RUN: FileCheck --check-prefix=CHECK-RV64 %s -""") +""" + bfloat16_llvm_header = r"""// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +zvfbfmin \ +// RUN: -target-feature +zvfbfwma -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +""" gnu_header = ( r"""/* { dg-do compile } */ /* { dg-options """ + '"' + "-march=rv64gcv_zvfh -mabi=lp64d" + r""" -Wno-psabi -O3 -fno-schedule-insns -fno-schedule-insns2" } */ """) + + # Dynamic header is used when the requires_exts is not empty. + if requires_exts: + dynamic_llvm_header = dynamic_llvm_header_prologue + for ext in requires_exts: + # Due to requirements of SEW==32 intrinsics will be used + # in the LLVM test header, the extension "zvknha" + # should be replaced with "zvknhb" for the following + # SEW==64 intrinsics. + if ext == "zvknha": + ext = "zvknhb" + dynamic_llvm_header += f"// RUN: -target-feature +{ext} \\\n" + dynamic_llvm_header += dynamic_llvm_header_epilogue + if self.toolchain_type == ToolChainType.LLVM: - if has_float_type: + if requires_exts: + self.fd.write(dynamic_llvm_header) + elif has_bfloat16_type: + self.fd.write(bfloat16_llvm_header) + elif has_float_type: self.fd.write(float_llvm_header) else: self.fd.write(int_llvm_header) @@ -506,16 +610,20 @@ def func(self, inst_info, name, return_type, **kwargs): os.path.join(self.folder, test_file_name), mode, encoding="utf-8") stripped_prefix_non_overloaded_func_name = non_overloaded_func_name[8:] - func_decl = super().func(inst_info, - "test_" + stripped_prefix_non_overloaded_func_name, - return_type, **kwargs) - func_decl = func_decl.replace(" (", "(") + non_overloaded_func_name = "test_" + \ + stripped_prefix_non_overloaded_func_name + self.generated_functions_set.add(non_overloaded_func_name) + args = ", ".join(map(lambda a: f"{a[1]} {a[0]}", kwargs.items())) + # "T * name" to "T *name" + args = args.replace("* ", "*") + func_decl = f"{return_type} {non_overloaded_func_name}({args});\n" # Strip redundant parameters in function declaration because the intrinsic # requires an immediate to be provided to the parameter. # For "vxrm" parameter of the fixed-point intrinsics, value for it must be # an immediate. func_decl = func_decl.replace(", unsigned int vxrm", "") + func_decl = func_decl.replace(", size_t uimm", "") # For "frm" parameter of the floating-point intrinsics, value for it must # be an immediate. @@ -527,6 +635,7 @@ def func(self, inst_info, name, return_type, **kwargs): # righteously, there should be a function to determine if an intrinsic # has a floating-point variant and have the header emission depend on it. has_float_type = func_decl.find("vfloat") != -1 + has_bfloat16_type = func_decl.find("bf16") != -1 # NOTE(FIXME): This is logic as a hard fix to test case header emission. has_float_type_variant_inst = [ "macc", "nmacc", "msac", "nmsac", "madd", "nmadd", "msub", "nmsub", @@ -539,7 +648,8 @@ def func(self, inst_info, name, return_type, **kwargs): has_float_type = True if header: - self.write_file_header(has_float_type) + self.write_file_header(has_float_type, has_bfloat16_type, + inst_info.get_required_exts()) def output_call_arg(arg_name, type_name): if ((name.startswith("vget") or name.startswith("vset")) \ @@ -553,6 +663,9 @@ def output_call_arg(arg_name, type_name): if arg_name == "frm": return "__RISCV_FRM_RNE" + if arg_name == "uimm": + return "0" + return arg_name # Write test func body. @@ -596,8 +709,17 @@ def post_gen(self): self.fd.write(dg_pattern_str) self.fd.close() - def function_group(self, template, title, link, op_list, type_list, sew_list, - lmul_list, decorator_list): + def function_group(self, + template, + title, + link, + op_list, + type_list, + sew_list, + lmul_list, + decorator_list, + description=None, + required_ext_list=None): self.test_file_names = op_list template.render( G=self, @@ -605,7 +727,9 @@ def function_group(self, template, title, link, op_list, type_list, sew_list, type_list=type_list, sew_list=sew_list, lmul_list=lmul_list, - decorator_list=decorator_list) + decorator_list=decorator_list, + description=description, + required_ext_list=required_ext_list) class Grouper(Generator): @@ -627,6 +751,15 @@ def start_group(self, group_name): if group_name not in self.groups: self.groups[group_name] = [] + def inst_group_prologue(self): + return "" + + def inst_group_epilogue(self): + return "" + + def write(self, text): + pass + def func(self, inst_info, name, return_type, **kwargs): func_name = Generator.func_name(name) @@ -642,8 +775,17 @@ def func(self, inst_info, name, return_type, **kwargs): def query_group_desc(self, func_name): return self.func_group[func_name] - def function_group(self, template, title, link, op_list, type_list, sew_list, - lmul_list, decorator_list): + def function_group(self, + template, + title, + link, + op_list, + type_list, + sew_list, + lmul_list, + decorator_list, + description=None, + required_ext_list=None): self.op_list = op_list self.groups[self.current_group].append(title) self.current_sub_group = title @@ -653,7 +795,9 @@ def function_group(self, template, title, link, op_list, type_list, sew_list, type_list=type_list, sew_list=sew_list, lmul_list=lmul_list, - decorator_list=decorator_list) + decorator_list=decorator_list, + description=description, + required_ext_list=required_ext_list) class CompatibleHeaderGenerator(Generator): @@ -782,12 +926,39 @@ def gen_prologue(self): def write(self, text): self.fd.write(text) - def function_group(self, template, title, link, op_list, type_list, sew_list, - lmul_list, decorator_list): + def start_group(self, group_name): + pass + + def inst_group_prologue(self): + return "" + + def inst_group_epilogue(self): + return "" + + def function_group(self, + template, + title, + link, + op_list, + type_list, + sew_list, + lmul_list, + decorator_list, + description=None, + required_ext_list=None): if self.has_tail_policy and len(decorator_list) == 0: return - super().function_group(template, title, link, op_list, type_list, sew_list, - lmul_list, decorator_list) + super().function_group( + template, + title, + link, + op_list, + type_list, + sew_list, + lmul_list, + decorator_list, + description=description, + required_ext_list=required_ext_list) @staticmethod def is_policy_func(inst_info): diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/inst.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/inst.py index fe2b1b07f..4acb6701f 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/inst.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/inst.py @@ -236,12 +236,18 @@ def gen(g): #################################################################### g.start_group("Vector Fixed-Point Arithmetic Intrinsics") + vxsat_description = "After executing an intrinsic in this section, " + \ + "the `vxsat` CSR assumes an UNSPECIFIED value." g.function_group( binary_op_template, "Vector Single-Width Saturating Add and Subtract Intrinsics", "vector-single-width-saturating-add-and-subtract", ["sadd", "ssub"], - ITYPES, SEWS, LMULS, decorators.has_masking_maskedoff_policy) + ITYPES, + SEWS, + LMULS, + decorators.has_masking_maskedoff_policy, + description=vxsat_description) g.function_group(binary_op_template, "Vector Single-Width Averaging Add and Subtract Intrinsics", @@ -254,8 +260,11 @@ def gen(g): "Vector Single-Width Fractional Multiply with Rounding and Saturation" + "Intrinsics", "vector-single-width-fractional-multiply-with-rounding-and-" + - "saturation", ["smul"], ["int"], SEWS, LMULS, - decorators.has_masking_maskedoff_policy_vxrm) + "saturation", ["smul"], ["int"], + SEWS, + LMULS, + decorators.has_masking_maskedoff_policy_vxrm, + description=vxsat_description) g.function_group(binary_op_template, "Vector Single-Width Scaling Shift Intrinsics", @@ -263,10 +272,15 @@ def gen(g): ITYPES, SEWS, LMULS, decorators.has_masking_maskedoff_policy_vxrm) - g.function_group(binary_nop_template, - "Vector Narrowing Fixed-Point Clip Intrinsics", - "vector-narrowing-fixed-point-clip", ["nclip"], ITYPES, - WSEWS, WLMULS, decorators.has_masking_maskedoff_policy_vxrm) + g.function_group( + binary_nop_template, + "Vector Narrowing Fixed-Point Clip Intrinsics", + "vector-narrowing-fixed-point-clip", ["nclip"], + ITYPES, + WSEWS, + WLMULS, + decorators.has_masking_maskedoff_policy_vxrm, + description=vxsat_description) #################################################################### g.start_group("Vector Floating-Point Intrinsics") diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/main.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/main.py index f9b84daf1..fe0205d1b 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/main.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/main.py @@ -24,6 +24,7 @@ import importlib.util import inspect import inst +import vector_crypto_inst import generator from enums import ToolChainType @@ -104,6 +105,7 @@ class GenTypes: parser.add_argument("--skip-default-inst", default=False, action="store_true") parser.add_argument("--vendor-generator-script") parser.add_argument("--vendor-generator-name") + parser.add_argument("--gen-vector-crypto", default=False, action="store_true") parser.add_argument("--out") args = parser.parse_args() @@ -137,6 +139,12 @@ class GenTypes: GenTypes.NON_OVERLOADED_COMPATIBLE_HEADER, GenTypes.OVERLOADED_COMPATIBLE_HEADER ]: + # Vector crypto does not need compatible header because we don't have + # them before v0.10 + if mode in (GenTypes.NON_OVERLOADED_COMPATIBLE_HEADER, + GenTypes.OVERLOADED_COMPATIBLE_HEADER) and\ + args.gen_vector_crypto: + return with open(args.out, "w", encoding="utf-8") as f: if mode == GenTypes.NON_OVERLOADED_DOC: g = generator.DocGenerator(f, True, args.has_policy) @@ -150,7 +158,10 @@ class GenTypes: assert False if not args.skip_default_inst: - inst.gen(g) + if args.gen_vector_crypto: + vector_crypto_inst.gen(g) + else: + inst.gen(g) else: print("Skipping default RVV instructions (--skip-default-inst)") if vendor_gen is not None: @@ -173,7 +184,10 @@ class GenTypes: else: assert False if not args.skip_default_inst: - inst.gen(g) + if args.gen_vector_crypto: + vector_crypto_inst.gen(g) + else: + inst.gen(g) else: print("Skipping default RVV instructions (--skip-default-inst)") if vendor_gen is not None: diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/binary_intcarry_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/binary_intcarry_template.py index ce439a027..5bef39625 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/binary_intcarry_template.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/binary_intcarry_template.py @@ -26,9 +26,17 @@ from enums import InstType -def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): +def render(G, + op_list, + type_list, + sew_list, + lmul_list, + decorator_list, + description, + required_ext_list=None): #pylint: disable=invalid-name # FIXME: Renaming 'G' to 'g' all in once later. + G.emit_function_group_description(description) G.inst_group_prologue() for decorator in decorator_list: decorator.write_text_header(G) @@ -38,10 +46,13 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): s = type_helper.s m = type_helper.m + assert args["OP"] is not None args["OP"] = "v" + args["OP"] - inst_info_vvm = InstInfo.get(args, decorator, InstType.VVVM) - inst_info_vxm = InstInfo.get(args, decorator, InstType.VVXM) + inst_info_vvm = InstInfo.get( + args, decorator, InstType.VVVM, required_ext=required_ext_list) + inst_info_vxm = InstInfo.get( + args, decorator, InstType.VVXM, required_ext=required_ext_list) if not "m" in args["OP"]: G.func( @@ -71,13 +82,18 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): s = type_helper.s m = type_helper.m + assert args["OP"] is not None args["OP"] = "v" + args["OP"] - inst_info_vvm = InstInfo.get(args, None, InstType.VVVM) - inst_info_vxm = InstInfo.get(args, None, InstType.VVXM) + inst_info_vvm = InstInfo.get( + args, None, InstType.VVVM, required_ext=required_ext_list) + inst_info_vxm = InstInfo.get( + args, None, InstType.VVXM, required_ext=required_ext_list) - inst_info_vv = InstInfo.get(args, None, InstType.VVV) - inst_info_vx = InstInfo.get(args, None, InstType.VVX) + inst_info_vv = InstInfo.get( + args, None, InstType.VVV, required_ext=required_ext_list) + inst_info_vx = InstInfo.get( + args, None, InstType.VVX, required_ext=required_ext_list) # madc or msbc if "m" in args["OP"]: diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/binary_nop_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/binary_nop_template.py index 82e1f27a8..c8162a00c 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/binary_nop_template.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/binary_nop_template.py @@ -32,9 +32,17 @@ def must_int_type(**kargs): # narrowing op template -def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): +def render(G, + op_list, + type_list, + sew_list, + lmul_list, + decorator_list, + description, + required_ext_list=None): #pylint: disable=invalid-name # FIXME: Renaming 'G' to 'g' all in once later. + G.emit_function_group_description(description) G.inst_group_prologue() for decorator in decorator_list: decorator.write_text_header(G) @@ -45,6 +53,7 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): SEW=sew_list, LMUL=lmul_list, OP2=["v", "s"]): + assert args["OP"] is not None data_type = args["TYPE"] op = args["OP"] op2 = args["OP2"] @@ -71,7 +80,8 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): continue type_helper = TypeHelper(**args) - inst_info = InstInfo.get(args, decorator, inst_type) + inst_info = InstInfo.get( + args, decorator, inst_type, required_ext=required_ext_list) if op in ["nsrl", "nsra", "nclip"]: if op2 == "v": diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/binary_op_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/binary_op_template.py index 2232609b6..a72322536 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/binary_op_template.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/binary_op_template.py @@ -28,9 +28,17 @@ from enums import ExtraAttr -def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): +def render(G, + op_list, + type_list, + sew_list, + lmul_list, + decorator_list, + description, + required_ext_list=None): #pylint: disable=invalid-name # FIXME: Renaming 'G' to 'g' all in once later. + G.emit_function_group_description(description) G.inst_group_prologue() for decorator in decorator_list: decorator.write_text_header(G) @@ -40,6 +48,7 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): SEW=sew_list, LMUL=lmul_list, OP2=["v", "s"]): + assert args["OP"] is not None data_type = args["TYPE"] op = args["OP"] sew = args["SEW"] @@ -61,6 +70,7 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): type_helper = TypeHelper(**args) + s_op2 = None if (op in ["mulhsu", "ssra", "sra"] and data_type == "uint") or \ (op in ["ssrl", "srl"] and data_type == "int"): # Unsigned mulhsu and ssra are unsupported, signed ssrl is unsupported @@ -91,10 +101,14 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): args["OP"] = "v" + args["OP"] - inst_info_vv = InstInfo.get(args, decorator, InstType.VVV) - inst_info_vx = InstInfo.get(args, decorator, InstType.VVX) - inst_info_vf = InstInfo.get(args, decorator, InstType.VVF) - inst_info_v = InstInfo.get(args, decorator, InstType.VV) + inst_info_vv = InstInfo.get( + args, decorator, InstType.VVV, required_ext=required_ext_list) + inst_info_vx = InstInfo.get( + args, decorator, InstType.VVX, required_ext=required_ext_list) + inst_info_vf = InstInfo.get( + args, decorator, InstType.VVF, required_ext=required_ext_list) + inst_info_v = InstInfo.get( + args, decorator, InstType.VV, required_ext=required_ext_list) if args["OP2"] == "v": inst_info = inst_info_vv elif args["OP2"] == "x": @@ -102,7 +116,7 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): elif args["OP2"] == "f": inst_info = inst_info_vf else: - raise Exception("Unknown op2 type.") + raise ValueError("Unknown op2 type.") if op in ["ssra", "sra", "ssrl", "srl", "sll"]: if args["OP2"] == "v": @@ -143,7 +157,9 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): elif "rgather" == op: if op2 == "v": G.func( - InstInfo.get(args, decorator, InstType.VVV), + InstInfo.get( + args, decorator, InstType.VVV, + required_ext=required_ext_list), name="{OP}_v{OP2}_{TYPE}{SEW}m{LMUL}".format_map(args) + decorator.func_suffix, return_type=type_helper.v, @@ -154,7 +170,9 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): vl=type_helper.size_t) else: # vx G.func( - InstInfo.get(args, decorator, InstType.VVV), + InstInfo.get( + args, decorator, InstType.VVV, + required_ext=required_ext_list), name="{OP}_v{OP2}_{TYPE}{SEW}m{LMUL}".format_map(args) + decorator.func_suffix, return_type=type_helper.v, diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/binary_wop_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/binary_wop_template.py index b8a50d23f..9bd5036ee 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/binary_wop_template.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/binary_wop_template.py @@ -26,13 +26,22 @@ from enums import InstType -def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): +def render(G, + op_list, + type_list, + sew_list, + lmul_list, + decorator_list, + description, + required_ext_list=None): #pylint: disable=invalid-name # FIXME: Renaming 'G' to 'g' all in once later. + G.emit_function_group_description(description) G.inst_group_prologue() for decorator in decorator_list: decorator.write_text_header(G) for args in prod(OP=op_list, TYPE=type_list, SEW=sew_list, LMUL=lmul_list): + assert args["OP"] is not None data_type = args["TYPE"] op = args["OP"] @@ -46,12 +55,18 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): args["OP"] = "v" + args["OP"] - inst_info_wvv = InstInfo.get(args, decorator, InstType.WVV) - inst_info_wvx = InstInfo.get(args, decorator, InstType.WVX) - inst_info_wvf = InstInfo.get(args, decorator, InstType.WVF) - inst_info_wwv = InstInfo.get(args, decorator, InstType.WWV) - inst_info_wwx = InstInfo.get(args, decorator, InstType.WWX) - inst_info_wwf = InstInfo.get(args, decorator, InstType.WWF) + inst_info_wvv = InstInfo.get( + args, decorator, InstType.WVV, required_ext=required_ext_list) + inst_info_wvx = InstInfo.get( + args, decorator, InstType.WVX, required_ext=required_ext_list) + inst_info_wvf = InstInfo.get( + args, decorator, InstType.WVF, required_ext=required_ext_list) + inst_info_wwv = InstInfo.get( + args, decorator, InstType.WWV, required_ext=required_ext_list) + inst_info_wwx = InstInfo.get( + args, decorator, InstType.WWX, required_ext=required_ext_list) + inst_info_wwf = InstInfo.get( + args, decorator, InstType.WWF, required_ext=required_ext_list) args["LMUL"] = args["WLMUL"] args["SEW"] = args["WSEW"] diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/cmp_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/cmp_template.py index 8731d3744..0ad6a483b 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/cmp_template.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/cmp_template.py @@ -26,9 +26,17 @@ from enums import InstType -def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): +def render(G, + op_list, + type_list, + sew_list, + lmul_list, + decorator_list, + description, + required_ext_list=None): #pylint: disable=invalid-name # FIXME: Renaming 'G' to 'g' all in once later. + G.emit_function_group_description(description) G.inst_group_prologue() for decorator in decorator_list: decorator.write_text_header(G) @@ -38,6 +46,7 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): SEW=sew_list, LMUL=lmul_list, OP2=["v", "s"]): + assert args["OP"] is not None data_type = args["TYPE"] op = args["OP"] op2 = args["OP2"] @@ -64,7 +73,8 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): op = op + "u" args["OP"] = "v" + op - inst_info = InstInfo.get(args, decorator, inst_type) + inst_info = InstInfo.get( + args, decorator, inst_type, required_ext=required_ext_list) if op2 == "v": G.func( inst_info, diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/cvt_op_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/cvt_op_template.py index 48b0a62e7..50356d520 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/cvt_op_template.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/cvt_op_template.py @@ -28,11 +28,19 @@ from constants import ITYPES -def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): +def render(G, + op_list, + type_list, + sew_list, + lmul_list, + decorator_list, + description, + required_ext_list=None): #pylint: disable=invalid-name, unused-argument # FIXME: Renaming 'G' to 'g' all in once later. # FIXME: Argument 'type_list' is unused but required for interface # consistency. We can prune it in the future. + G.emit_function_group_description(description) G.inst_group_prologue() for decorator in decorator_list: decorator.write_text_header(G) @@ -40,12 +48,20 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): # [dst_type, dst_type_short, src_type, src_type_short] if type_list == ITYPES: convert_set = [["int", "x", "int", "x"], ["uint", "x", "uint", "x"]] + elif type_list == "bfloat16": + if "ncvtbf16" in op_list: + convert_set = [["bfloat", "bf", "float", "f"]] + elif "wcvtbf16" in op_list: + convert_set = [["float", "f", "bfloat", "bf"]] + else: + assert False, "Unhandled instruction with type_list = 'bfloat16'" else: convert_set = [["int", "x", "float", "f"], ["uint", "xu", "float", "f"], ["float", "f", "int", "x"], ["float", "f", "uint", "xu"], ["float", "f", "float", "f"]] for args in prod( OP=op_list, SEW=sew_list, TYPES=convert_set, LMUL=lmul_list): + assert args["TYPES"] is not None op = args["OP"] type_helper = TypeHelper(**args) @@ -63,7 +79,7 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): # A double-width IEEE floating-point value can always represent a # single-width IEEE floating-point value exactly. # So we don't need frm variant for vfwcvt.f.f, and vfwcvt.f.x(u) here - if op == "wcvt" and decorator.flags & ExtraAttr.HAS_FRM and\ + if "wcvt" in op and decorator.flags & ExtraAttr.HAS_FRM and\ (args["TYPES0"] == args["TYPES2"] or\ ("float" in args["TYPES0"] and "int" in args["TYPES2"])): continue @@ -75,16 +91,16 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): args["MIDDLE"] = "v" factor = "" - if op == "wcvt": + if "wcvt" in op: factor = "W" - if op == "ncvt": + if "ncvt" in op: factor = "N" args["MIDDLE"] = "w" args["LLMUL"] = args[factor + "LMUL"] args["LSEW"] = args[factor + "SEW"] - if args["TYPES1"] == "f" or args["TYPES3"] == "f": + if "f" in args["TYPES1"] or "f" in args["TYPES3"]: args["OP"] = "f" + args["OP"] if args["TYPES0"] == "uint": @@ -102,7 +118,11 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): extra_attr = ExtraAttr.CONVERT inst_info = InstInfo.get( - args, decorator, InstType.VV, extra_attr=extra_attr) + args, + decorator, + InstType.VV, + extra_attr=extra_attr, + required_ext=required_ext_list) args["TYPE"] = args["TYPES2"] src_type_helper = TypeHelper(**args) @@ -115,9 +135,17 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): if not type_helper.valid_vtype(dst_type) or\ not type_helper.valid_vtype(src_type): continue - func_name = \ - "{OP}_{TYPES1}_{TYPES3}_{MIDDLE}_{D_TYPE}{LSEW}m{LLMUL}".format_map\ - (args) + if type_list == "bfloat16": + if "ncvt" in args["OP"]: + func_name = "{OP}_f_f_w_bf{LSEW}m{LLMUL}".format_map(args) + elif "wcvt" in args["OP"]: + func_name = "{OP}_f_f_v_f{LSEW}m{LLMUL}".format_map(args) + else: + assert False, "Unhandled instruction for bfloat16 type" + else: + func_name = \ + "{OP}_{TYPES1}_{TYPES3}_{MIDDLE}_{D_TYPE}{LSEW}m{LLMUL}".format_map\ + (args) G.func( inst_info, name=func_name + decorator.func_suffix, @@ -134,10 +162,18 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): if decorator.flags & ExtraAttr.HAS_FRM: continue + # BFloat16 converts do not have `_rod`/`_rtz` instructions + if type_list == "bfloat16": + continue + if args["TYPES1"] != args["TYPES3"] and args["TYPES3"] == "f": args["OP"] = args["OP"] + "_rtz" inst_info = InstInfo.get( - args, decorator, InstType.VV, extra_attr=extra_attr) + args, + decorator, + InstType.VV, + extra_attr=extra_attr, + required_ext=required_ext_list) func_name =\ "{OP}_{TYPES1}_{TYPES3}_{MIDDLE}_{D_TYPE}{LSEW}m{LLMUL}".format_map\ (args) @@ -153,7 +189,8 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): if op == "ncvt" and args["TYPES1"] == "f" and args["TYPES3"] == "f": args["OP"] = args["OP"] + "_rod" inst_info = \ - InstInfo.get(args, decorator, InstType.VV, extra_attr=extra_attr) + InstInfo.get(args, decorator, InstType.VV, extra_attr=extra_attr, + required_ext = required_ext_list) func_name = \ "{OP}_{TYPES1}_{TYPES3}_{MIDDLE}_{D_TYPE}{LSEW}m{LLMUL}".format_map\ (args) diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/get_set_diff_lmul_op_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/get_set_diff_lmul_op_template.py index c43a27ad0..9eda7a796 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/get_set_diff_lmul_op_template.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/get_set_diff_lmul_op_template.py @@ -50,9 +50,17 @@ def vset_constraint(**kargs): and int(kargs["LMUL"]) > int(kargs["SRC_LMUL"]) -def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): +def render(G, + op_list, + type_list, + sew_list, + lmul_list, + decorator_list, + description, + required_ext_list=None): #pylint: disable=invalid-name # FIXME: Renaming 'G' to 'g' all in once later. + G.emit_function_group_description(description) G.inst_group_prologue() for decorator in decorator_list: decorator.write_text_header(G) @@ -65,7 +73,7 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): constraint = vset_constraint vget = False else: - raise Exception("Unknown operation") + raise ValueError("Unknown operation") for args in prod( OP=op_list, @@ -82,14 +90,16 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): args) if vget: G.func( - InstInfo.get(args, decorator, InstType.VGET), + InstInfo.get( + args, decorator, InstType.VGET, required_ext=required_ext_list), name=func_name, return_type=type_helper.v, src=src_type, index=type_helper.size_t) else: G.func( - InstInfo.get(args, decorator, InstType.VSET), + InstInfo.get( + args, decorator, InstType.VSET, required_ext=required_ext_list), name=func_name, return_type=type_helper.v, dest=type_helper.v, @@ -113,7 +123,11 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): func_name = "{OP}_v_{TYPE}{SEW}m{LMUL}x{NF}_{TYPE}{SEW}m{LMUL}".\ format_map(args) G.func( - InstInfo.get(args, decorator, InstType.VGET), + InstInfo.get( + args, + decorator, + InstType.VGET, + required_ext=required_ext_list), name=func_name, return_type=vector_type, src=tuple_type, @@ -122,7 +136,11 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): func_name = "{OP}_v_{TYPE}{SEW}m{LMUL}_{TYPE}{SEW}m{LMUL}x{NF}".\ format_map(args) G.func( - InstInfo.get(args, decorator, InstType.VSET), + InstInfo.get( + args, + decorator, + InstType.VSET, + required_ext=required_ext_list), name=func_name, return_type=tuple_type, dest=tuple_type, diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/load_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/load_template.py index 4d2529a6d..f8573cf26 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/load_template.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/load_template.py @@ -29,14 +29,23 @@ from enums import ExtraAttr -def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): +def render(G, + op_list, + type_list, + sew_list, + lmul_list, + decorator_list, + description, + required_ext_list=None): #pylint: disable=invalid-name # FIXME: Renaming 'G' to 'g' all in once later. + G.emit_function_group_description(description) G.inst_group_prologue() for decorator in decorator_list: decorator.write_text_header(G) for args in prod( OP=op_list, TYPE=type_list, SEW=sew_list, EEW=sew_list, LMUL=lmul_list): + assert args["OP"] is not None op = args["OP"] sew = args["SEW"] eew = args["EEW"] @@ -67,7 +76,8 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): if op not in ["vloxei", "vluxei"] and sew != eew: continue inst_info =\ - InstInfo.get(args, decorator, inst_type, MemType.LOAD, extra_attr) + InstInfo.get(args, decorator, inst_type, MemType.LOAD, extra_attr, + required_ext = required_ext_list) G.func( inst_info, name=\ diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/mac_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/mac_template.py index 0900eda42..68888a47e 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/mac_template.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/mac_template.py @@ -27,13 +27,22 @@ from enums import ExtraAttr -def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): +def render(G, + op_list, + type_list, + sew_list, + lmul_list, + decorator_list, + description, + required_ext_list=None): #pylint: disable=invalid-name # FIXME: Renaming 'G' to 'g' all in once later. + G.emit_function_group_description(description) G.inst_group_prologue() for decorator in decorator_list: decorator.write_text_header(G) for args in prod(OP=op_list, TYPE=type_list, SEW=sew_list, LMUL=lmul_list): + assert args["TYPE"] is not None data_type = args["TYPE"] op = args["OP"] @@ -41,7 +50,7 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): if "int" in data_type and decorator.flags & ExtraAttr.HAS_FRM: continue - if data_type == "float": + if "float" in data_type: args["S_TYPE"] = "f" args["OP"] = "f" + op inst_type = InstType.VVF @@ -55,11 +64,23 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): args["OP"] = "v" + args["OP"] inst_info_vs = InstInfo.get( - args, decorator, inst_type, extra_attr=ExtraAttr.MAC) + args, + decorator, + inst_type, + extra_attr=ExtraAttr.MAC, + required_ext=required_ext_list) inst_info_vv = InstInfo.get( - args, decorator, InstType.VVV, extra_attr=ExtraAttr.MAC) + args, + decorator, + InstType.VVV, + extra_attr=ExtraAttr.MAC, + required_ext=required_ext_list) inst_info_vx = InstInfo.get( - args, decorator, InstType.VVX, extra_attr=ExtraAttr.MAC) + args, + decorator, + InstType.VVX, + extra_attr=ExtraAttr.MAC, + required_ext=required_ext_list) type_helper = TypeHelper(**args) if (("maccsu" in op) or ("maccus" in op)) and data_type == "uint": @@ -129,14 +150,22 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): rs1=type_helper.s, vs2=type_helper.v, vl=type_helper.size_t) - elif data_type == "float" and "w" in op: + elif "float" in data_type and "w" in op: + # Vector BF16 widening multiply-accumulate computes into FP32 values + if args["TYPE"] == "bfloat": + args["TYPE"] = "float" + dst_type_helper = TypeHelper(**args) + dst_type = dst_type_helper.wv + else: + dst_type = type_helper.wv + G.func( inst_info_vv, name="{OP}_vv_{TYPE}{WSEW}m{WLMUL}".format_map(args) + decorator.func_suffix, - return_type=type_helper.wv, + return_type=dst_type, **decorator.mask_args(type_helper.m, type_helper.v), - vd=type_helper.wv, + vd=dst_type, vs1=type_helper.v, vs2=type_helper.v, **decorator.extra_csr_args(type_helper.uint), @@ -145,9 +174,9 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): inst_info_vs, name="{OP}_v{S_TYPE}_{TYPE}{WSEW}m{WLMUL}".format_map(args) + decorator.func_suffix, - return_type=type_helper.wv, + return_type=dst_type, **decorator.mask_args(type_helper.m, type_helper.v), - vd=type_helper.wv, + vd=dst_type, vs1=type_helper.s, vs2=type_helper.v, **decorator.extra_csr_args(type_helper.uint), diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/mask_load_store_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/mask_load_store_template.py index e222b4635..9383305ef 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/mask_load_store_template.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/mask_load_store_template.py @@ -26,11 +26,19 @@ from enums import InstType -def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): +def render(G, + op_list, + type_list, + sew_list, + lmul_list, + decorator_list, + description, + required_ext_list=None): #pylint: disable=invalid-name, unused-argument # FIXME: Renaming 'G' to 'g' all in once later. # FIXME: Argument 'lmul_list' is unused but required for interface # consistency. We can prune it in the future. + G.emit_function_group_description(description) G.inst_group_prologue() for decorator in decorator_list: decorator.write_text_header(G) @@ -41,7 +49,8 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): load_p = op == "vlm" - inst_info = InstInfo.get(args, decorator, InstType.V) + inst_info = InstInfo.get( + args, decorator, InstType.V, required_ext=required_ext_list) if load_p: base_type = "const uint8_t *" diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/mask_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/mask_template.py index c450033ae..743e0fd4a 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/mask_template.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/mask_template.py @@ -25,14 +25,23 @@ from enums import InstType -def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): +def render(G, + op_list, + type_list, + sew_list, + lmul_list, + decorator_list, + description, + required_ext_list=None): #pylint: disable=invalid-name # FIXME: Renaming 'G' to 'g' all in once later. + G.emit_function_group_description(description) G.inst_group_prologue() for decorator in decorator_list: decorator.write_text_header(G) # treat sew_list as MLEN for args in prod(OP=op_list, TYPE=type_list, MLEN=sew_list): + assert args["OP"] is not None op = args["OP"] if op not in ["cpop", "first"]: args["OP"] = "m" + args["OP"] @@ -43,8 +52,10 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): args["OP"] = "v" + args["OP"] - inst_info_mm = InstInfo.get(args, decorator, InstType.MMM) - inst_info_m = InstInfo.get(args, decorator, InstType.MM) + inst_info_mm = InstInfo.get( + args, decorator, InstType.MMM, required_ext=required_ext_list) + inst_info_m = InstInfo.get( + args, decorator, InstType.MM, required_ext=required_ext_list) if op in ["mv", "not"]: # unary operator G.func( @@ -71,7 +82,7 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): G.func( inst_info_m, name="{OP}_m_b{MLEN}".format_map(args) + decorator.func_suffix, - return_type=type_helper.uint, + return_type=type_helper.ulong, **decorator.mask_args(type_helper.m), vs2=type_helper.m, vl=type_helper.size_t) @@ -79,7 +90,7 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): G.func( inst_info_m, name="{OP}_m_b{MLEN}".format_map(args) + decorator.func_suffix, - return_type=type_helper.int, + return_type=type_helper.long, **decorator.mask_args(type_helper.m), vs2=type_helper.m, vl=type_helper.size_t) @@ -94,6 +105,7 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): vl=type_helper.size_t) for args in prod(OP=op_list, TYPE=type_list, SEW=sew_list, LMUL=lmul_list): + assert args["OP"] is not None op = args["OP"] type_helper = TypeHelper(**args) @@ -101,7 +113,8 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): if op == "iota": G.func( - InstInfo.get(args, decorator, InstType.MM), + InstInfo.get(args, decorator, InstType.MM, + required_ext = required_ext_list), name=\ "viota_m_u{SEW}m{LMUL}".format_map(args) + decorator.func_suffix, return_type=type_helper.uiv, @@ -111,7 +124,8 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): vl=type_helper.size_t) if op == "id": G.func( - InstInfo.get(args, decorator, InstType.VM), + InstInfo.get( + args, decorator, InstType.VM, required_ext=required_ext_list), name="vid_v_u{SEW}m{LMUL}".format_map(args) + decorator.func_suffix, return_type=type_helper.uiv, **decorator.mask_args(type_helper.m, type_helper.uiv), diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/misc_op_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/misc_op_template.py index 9d38a0a9b..c8540467b 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/misc_op_template.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/misc_op_template.py @@ -30,9 +30,17 @@ from generator import CompatibleHeaderGenerator -def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): +def render(G, + op_list, + type_list, + sew_list, + lmul_list, + decorator_list, + description, + required_ext_list=None): #pylint: disable=invalid-name # FIXME: Renaming 'G' to 'g' all in once later. + G.emit_function_group_description(description) G.inst_group_prologue() # vundefine for non-tuple for decorator in decorator_list: @@ -40,8 +48,10 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): break decorator.write_text_header(G) + inst_type = None for args in prod(OP=op_list, TYPE=type_list, SEW=sew_list, LMUL=lmul_list): type_helper = TypeHelper(**args) + inst_type = InstType.UNKNOWN if args["OP"] not in ["vundefined"]: break if args["TYPE"] == "float" and args["SEW"] == 8: @@ -49,7 +59,8 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): if args["OP"] == "vundefined": inst_type = InstType.VUNDEF G.func( - InstInfo.get(args, decorator, inst_type), + InstInfo.get( + args, decorator, inst_type, required_ext=required_ext_list), name="{OP}_{TYPE}{SEW}m{LMUL}".format_map(args) + decorator.func_suffix, return_type=type_helper.v) @@ -72,6 +83,7 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): LMUL=lmul_list, NF=nf_list): type_helper = TypeHelper(**args) + inst_type = InstType.UNKNOWN if args["OP"] not in ["vundefined"]: break if args["TYPE"] == "float" and args["SEW"] == 8: @@ -79,7 +91,8 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): if args["OP"] == "vundefined": inst_type = InstType.VUNDEF G.func( - InstInfo.get(args, decorator, inst_type), + InstInfo.get( + args, decorator, inst_type, required_ext=required_ext_list), name="{OP}_{TYPE}{SEW}m{LMUL}x{NF}".format_map(args) + decorator.func_suffix, return_type=type_helper.tuple_v) @@ -93,6 +106,7 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): SEW=sew_list, LMUL=lmul_list, DST_LMUL=lmul_list): + assert args["TYPE"] is not None op = args["OP"] src_lmul = args["LMUL"] dst_lmul = args["DST_LMUL"] @@ -105,8 +119,12 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): if get_float_lmul(src_lmul) >= get_float_lmul(dst_lmul): continue type_helper = TypeHelper(**args) - inst_info = InstInfo.get(args, decorator, inst_type) - args["TYPE1"] = args["TYPE"][0] + inst_info = InstInfo.get( + args, decorator, inst_type, required_ext=required_ext_list) + if args["TYPE"] == "bfloat": + args["TYPE1"] = args["TYPE"][0:2] + else: + args["TYPE1"] = args["TYPE"][0] func_name = "{OP}_{TYPE1}{SEW}m{LMUL}_{TYPE1}{SEW}m{DST_LMUL}".format_map( args) @@ -134,7 +152,8 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): DST_LMUL=lmul_list): type_helper = TypeHelper(**args) - inst_info = InstInfo.get(args, decorator, InstType.VCREATE) + inst_info = InstInfo.get( + args, decorator, InstType.VCREATE, required_ext=required_ext_list) func_name = "{OP}_v_{TYPE}{SEW}m{LMUL}_{TYPE}{SEW}m{DST_LMUL}".format_map( args) @@ -170,6 +189,7 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): SEW=sew_list, LMUL=lmul_list, NF=nf_list): + assert args["NF"] is not None type_helper = TypeHelper(**args) # This intrinsic appears after v0.12 @@ -182,7 +202,9 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): args_for_vcreate[arg_name] = type_helper.v G.func( - InstInfo.get(args, decorator, InstType.VCREATE), + InstInfo.get( + args, decorator, InstType.VCREATE, + required_ext=required_ext_list), name="{OP}_v_{TYPE}{SEW}m{LMUL}x{NF}".format_map(args), return_type=type_helper.tuple_v, **args_for_vcreate) diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/permute_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/permute_template.py index ad79af2da..4effaa4f3 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/permute_template.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/permute_template.py @@ -26,9 +26,17 @@ from enums import InstType -def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): +def render(G, + op_list, + type_list, + sew_list, + lmul_list, + decorator_list, + description, + required_ext_list=None): #pylint: disable=invalid-name # FIXME: Renaming 'G' to 'g' all in once later. + G.emit_function_group_description(description) G.inst_group_prologue() for decorator in decorator_list: decorator.write_text_header(G) @@ -57,13 +65,16 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): if op == "mv": if decorator.func_suffix == "": G.func( - InstInfo.get(args, decorator, sv_inst_type), + InstInfo.get( + args, decorator, sv_inst_type, + required_ext=required_ext_list), name="{OP}_{S_TYPE}_s_{TYPE}{SEW}m{LMUL}_{TYPE}{SEW}".format_map( args), return_type=type_helper.s, vs1=type_helper.v) G.func( - InstInfo.get(args, decorator, vs_inst_type), + InstInfo.get( + args, decorator, vs_inst_type, required_ext=required_ext_list), name="{OP}_s_{S_TYPE}_{TYPE}{SEW}m{LMUL}".format_map(args) + decorator.func_suffix, return_type=type_helper.v, @@ -72,7 +83,8 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): vl=type_helper.size_t) elif op in ["slide1up", "slide1down"]: G.func( - InstInfo.get(args, decorator, vvs_inst_type), + InstInfo.get( + args, decorator, vvs_inst_type, required_ext=required_ext_list), name="{OP}_v{S_TYPE}_{TYPE}{SEW}m{LMUL}".format_map(args) + decorator.func_suffix, return_type=type_helper.v, @@ -83,7 +95,8 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): vl=type_helper.size_t) elif op == "slideup": G.func( - InstInfo.get(args, decorator, InstType.VVX), + InstInfo.get( + args, decorator, InstType.VVX, required_ext=required_ext_list), name="{OP}_v{S_TYPE}_{TYPE}{SEW}m{LMUL}".format_map(args) + decorator.func_suffix, return_type=type_helper.v, @@ -94,7 +107,8 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): vl=type_helper.size_t) elif op == "slidedown": G.func( - InstInfo.get(args, decorator, InstType.VVX), + InstInfo.get( + args, decorator, InstType.VVX, required_ext=required_ext_list), name="{OP}_v{S_TYPE}_{TYPE}{SEW}m{LMUL}".format_map(args) + decorator.func_suffix, return_type=type_helper.v, @@ -105,7 +119,8 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): vl=type_helper.size_t) elif op == "compress": G.func( - InstInfo.get(args, decorator, InstType.VVV), + InstInfo.get( + args, decorator, InstType.VVV, required_ext=required_ext_list), name="{OP}_vm_{TYPE}{SEW}m{LMUL}".format_map(args) + decorator.func_suffix, return_type=type_helper.v, diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/reduction_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/reduction_template.py index 086fc425c..ed79bb35f 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/reduction_template.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/reduction_template.py @@ -27,13 +27,22 @@ from enums import ExtraAttr -def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): +def render(G, + op_list, + type_list, + sew_list, + lmul_list, + decorator_list, + description, + required_ext_list=None): #pylint: disable=invalid-name # FIXME: Renaming 'G' to 'g' all in once later. + G.emit_function_group_description(description) G.inst_group_prologue() for decorator in decorator_list: decorator.write_text_header(G) for args in prod(OP=op_list, TYPE=type_list, SEW=sew_list, LMUL=lmul_list): + assert args["OP"] is not None data_type = args["TYPE"] op = args["OP"] sew = args["SEW"] @@ -63,7 +72,11 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): args["OP"] = "v" + args["OP"] inst_info = InstInfo.get( - args, decorator, inst_type, extra_attr=ExtraAttr.REDUCE) + args, + decorator, + inst_type, + extra_attr=ExtraAttr.REDUCE, + required_ext=required_ext_list) if (data_type == "float" and op in ["redosum","redusum","redmax","redmin","wredosum","wredusum"])\ or ("int" in data_type): diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/reint_op_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/reint_op_template.py index 1f67b5a7a..739102255 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/reint_op_template.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/reint_op_template.py @@ -27,11 +27,17 @@ from generator import CompatibleHeaderGenerator -def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): +def render(G, + op_list, + type_list, + sew_list, + lmul_list, + decorator_list, + description, + required_ext_list=None): #pylint: disable=invalid-name, unused-argument # FIXME: Renaming 'G' to 'g' all in once later. - # FIXME: Argument 'type_list' is unused but required for interface - # consistency. We can prune it in the future. + G.emit_function_group_description(description) G.inst_group_prologue() for decorator in decorator_list: decorator.write_text_header(G) @@ -39,13 +45,20 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): G.write("// Reinterpret between different type under the same SEW/LMUL\n") # Variable in list means # [dst type, dst short type, src type, src short type] - convert_set = [["float", "f", "int", "i"], ["float", "f", "uint", "u"], - ["uint", "u", "int", "i"], ["int", "i", "uint", "u"], - ["int", "i", "float", "f"], ["uint", "u", "float", "f"]] + if type_list == "bfloat16": + convert_set = [["bfloat", "bf", "int", + "i"], ["bfloat", "bf", "uint", "u"], + ["int", "i", "bfloat", "bf"], + ["uint", "u", "bfloat", "bf"]] + else: + convert_set = [["float", "f", "int", "i"], ["float", "f", "uint", "u"], + ["uint", "u", "int", "i"], ["int", "i", "uint", "u"], + ["int", "i", "float", "f"], ["uint", "u", "float", "f"]] for args in prod( OP=op_list, SEW=sew_list, TYPES=convert_set, LMUL=lmul_list): sew = args["SEW"] + assert args["TYPES"] is not None type_helper = TypeHelper(**args) args["TYPES0"] = args["TYPES"][0] @@ -67,12 +80,17 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): "{OP}_v_{TYPES3}{SEW}m{LMUL}_{TYPES1}{SEW}m{LMUL}".format_map(args) src_type = "v{TYPES2}{SEW}m{LMUL}_t".format_map(args) G.func( - InstInfo.get(args, decorator, InstType.REINT), + InstInfo.get( + args, decorator, InstType.REINT, required_ext=required_ext_list), name=func_name + decorator.func_suffix, return_type=rt, **decorator.mask_args(type_helper.m, rt), src=src_type) + # Bfloat16 reinterpretations do not have variants below + if type_list == "bfloat16": + continue + G.write("// Reinterpret between different SEW under the same LMUL\n") # Variable in list means # [dst type, dst short type, src type, src short type] @@ -103,7 +121,8 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): "{OP}_v_{TYPES3}{SEW}m{LMUL}_{TYPES1}{DST_SEW}m{LMUL}".format_map(args) src_type = "v{TYPES2}{SEW}m{LMUL}_t".format_map(args) G.func( - InstInfo.get(args, decorator, InstType.REINT), + InstInfo.get( + args, decorator, InstType.REINT, required_ext=required_ext_list), name=func_name + decorator.func_suffix, return_type=rt, **decorator.mask_args(type_helper.m, rt), @@ -120,6 +139,7 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): convert_set = [["int", "i"], ["uint", "u"]] for args in prod( OP=op_list, SEW=sew_list, TYPES=convert_set, LMUL=lmul_list): + assert args["TYPES"] is not None type_helper = TypeHelper(**args) args["TYPES0"] = args["TYPES"][0] @@ -133,7 +153,8 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): func_name =\ "{OP}_v_{TYPES1}{SEW}m1_b{MLEN}".format_map(args) G.func( - InstInfo.get(args, decorator, InstType.REINT), + InstInfo.get( + args, decorator, InstType.REINT, required_ext=required_ext_list), name=func_name + decorator.func_suffix, return_type=mask_type, src=int_type) @@ -141,7 +162,8 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): func_name =\ "{OP}_v_b{MLEN}_{TYPES1}{SEW}m1".format_map(args) G.func( - InstInfo.get(args, decorator, InstType.REINT), + InstInfo.get( + args, decorator, InstType.REINT, required_ext=required_ext_list), name=func_name + decorator.func_suffix, return_type=int_type, src=mask_type) diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/seg_load_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/seg_load_template.py index 52691ea81..3cfbcee20 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/seg_load_template.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/seg_load_template.py @@ -32,9 +32,17 @@ from generator import CompatibleHeaderGenerator -def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): +def render(G, + op_list, + type_list, + sew_list, + lmul_list, + decorator_list, + description, + required_ext_list=None): #pylint: disable=invalid-name # FIXME: Renaming 'G' to 'g' all in once later. + G.emit_function_group_description(description) G.inst_group_prologue() nf_list = range(2, 9) for decorator in decorator_list: @@ -47,6 +55,7 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): EEW=sew_list, LMUL=lmul_list, NF=nf_list): + assert args["OP"] is not None op = args["OP"] nf = str(args["NF"]) sew = args["SEW"] @@ -77,7 +86,12 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): else: args["OP"] = op + nf + "e" + str(eew) - inst_info = InstInfo.get(args, decorator, inst_type, MemType.LOAD) + inst_info = InstInfo.get( + args, + decorator, + inst_type, + MemType.LOAD, + required_ext=required_ext_list) # Legacy non-tuple-type variant for the compatible header if isinstance(G, CompatibleHeaderGenerator): G.func( diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/seg_store_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/seg_store_template.py index 290ee4fac..d52bc49a6 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/seg_store_template.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/seg_store_template.py @@ -32,9 +32,17 @@ from generator import CompatibleHeaderGenerator -def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): +def render(G, + op_list, + type_list, + sew_list, + lmul_list, + decorator_list, + description, + required_ext_list=None): #pylint: disable=invalid-name # FIXME: Renaming 'G' to 'g' all in once later. + G.emit_function_group_description(description) G.inst_group_prologue() nf_list = range(2, 9) for decorator in decorator_list: @@ -47,6 +55,7 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): EEW=sew_list, LMUL=lmul_list, NF=nf_list): + assert args["OP"] is not None op = args["OP"] nf = str(args["NF"]) sew = args["SEW"] @@ -74,7 +83,12 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): else: args["OP"] = op + nf + "e" + str(eew) - inst_info = InstInfo.get(args, decorator, inst_type, MemType.STORE) + inst_info = InstInfo.get( + args, + decorator, + inst_type, + MemType.STORE, + required_ext=required_ext_list) # Legacy non-tuple-type variant for the compatible header if isinstance(G, CompatibleHeaderGenerator): G.func( diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/setvl_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/setvl_template.py index f7f7ad9ac..9a397d3ca 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/setvl_template.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/setvl_template.py @@ -25,23 +25,33 @@ from enums import InstType -def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): +def render(G, + op_list, + type_list, + sew_list, + lmul_list, + decorator_list, + description, + required_ext_list=None): #pylint: disable=invalid-name, unused-argument # FIXME: Renaming 'G' to 'g' all in once later. # FIXME: Argument 'type_list', 'decorator_list' is unused but required for # interface consistency. We can prune it in the future. + G.emit_function_group_description(description) G.inst_group_prologue() for args in prod(OP=op_list, SEW=sew_list, LMUL=lmul_list): type_helper = TypeHelper(**args) if args["OP"] == "vsetvlmax": G.func( - InstInfo.get(args, None, InstType.SETVLMAX), + InstInfo.get( + args, None, InstType.SETVLMAX, required_ext=required_ext_list), name="{OP}_e{SEW}m{LMUL}".format_map(args), return_type=type_helper.size_t) else: #vsetvl G.func( - InstInfo.get(args, None, InstType.SETVL), + InstInfo.get( + args, None, InstType.SETVL, required_ext=required_ext_list), name="{OP}_e{SEW}m{LMUL}".format_map(args), return_type=type_helper.size_t, avl=type_helper.size_t) diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/store_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/store_template.py index 98476e6ba..6e62e52b0 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/store_template.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/store_template.py @@ -28,14 +28,23 @@ from enums import MemType -def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): +def render(G, + op_list, + type_list, + sew_list, + lmul_list, + decorator_list, + description, + required_ext_list=None): #pylint: disable=invalid-name # FIXME: Renaming 'G' to 'g' all in once later. + G.emit_function_group_description(description) G.inst_group_prologue() for decorator in decorator_list: decorator.write_text_header(G) for args in prod( OP=op_list, TYPE=type_list, SEW=sew_list, EEW=sew_list, LMUL=lmul_list): + assert args["OP"] is not None op = args["OP"] sew = args["SEW"] eew = args["EEW"] @@ -59,7 +68,12 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): if op not in ["vsoxei", "vsuxei"] and sew != eew: continue - inst_info = InstInfo.get(args, decorator, inst_type, MemType.STORE) + inst_info = InstInfo.get( + args, + decorator, + inst_type, + MemType.STORE, + required_ext=required_ext_list) G.func( inst_info, name="{OP}_v_{TYPE}{SEW}m{LMUL}".format_map(args) + diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/unary_op_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/unary_op_template.py index 1253dbcc2..7f657bbe5 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/unary_op_template.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/unary_op_template.py @@ -28,20 +28,29 @@ import copy -def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): +def render(G, + op_list, + type_list, + sew_list, + lmul_list, + decorator_list, + description, + required_ext_list=None): #pylint: disable=invalid-name # FIXME: Renaming 'G' to 'g' all in once later. + G.emit_function_group_description(description) G.inst_group_prologue() for decorator in decorator_list: decorator.write_text_header(G) for args in prod(OP=op_list, TYPE=type_list, SEW=sew_list, LMUL=lmul_list): + assert args["OP"] is not None data_type = args["TYPE"] op = args["OP"] if op in ["zext", "sext"]: break - if data_type == "float": + if data_type in ["float"]: args["S_TYPE"] = "f" args["OP"] = "f" + args["OP"] inst_type_vvsm = InstType.VVFM @@ -61,24 +70,40 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): extra_attr = ExtraAttr.NO_ATTR inst_info_vv = InstInfo.get( - args, decorator, InstType.VV, extra_attr=extra_attr) + args, + decorator, + InstType.VV, + extra_attr=extra_attr, + required_ext=required_ext_list) inst_info_vs = InstInfo.get( - args, decorator, inst_type_vs, extra_attr=extra_attr) + args, + decorator, + inst_type_vs, + extra_attr=extra_attr, + required_ext=required_ext_list) inst_info_vvsm = InstInfo.get( - args, decorator, inst_type_vvsm, extra_attr=extra_attr) + args, + decorator, + inst_type_vvsm, + extra_attr=extra_attr, + required_ext=required_ext_list) # Special rule for vfmv_v_v, we don"t have vfmv.v.v but vmv.v.v can used # for float type, accrdoing current naming scheming it # should be vmv_v_v, same for vmerge.vvm. vv_args = args - if data_type == "float" and op in ["mv", "merge"]: + if data_type in ["float", "bfloat"] and op in ["mv", "merge"]: vv_args = copy.deepcopy(args) vv_args["OP"] = "v" + op if op == "merge": G.func( InstInfo.get( - vv_args, decorator, InstType.VVVM, extra_attr=extra_attr), + vv_args, + decorator, + InstType.VVVM, + extra_attr=extra_attr, + required_ext=required_ext_list), name="{OP}_vvm_{TYPE}{SEW}m{LMUL}".format_map(vv_args) + decorator.func_suffix, return_type=type_helper.v, @@ -87,6 +112,10 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): vs1=type_helper.v, v0=type_helper.m, vl=type_helper.size_t) + + if data_type == "bfloat": + continue + G.func( inst_info_vvsm, name="{OP}_v{S_TYPE}m_{TYPE}{SEW}m{LMUL}".format_map(args) + @@ -99,13 +128,17 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): vl=type_helper.size_t) elif op == "mv": G.func( - InstInfo.get(vv_args, decorator, InstType.VV), + InstInfo.get( + vv_args, decorator, InstType.VV, + required_ext=required_ext_list), name="{OP}_v_v_{TYPE}{SEW}m{LMUL}".format_map(vv_args) + decorator.func_suffix, return_type=type_helper.v, **decorator.tu_dest_args(type_helper.v), vs1=type_helper.v, vl=type_helper.size_t) + if data_type == "bfloat": + continue G.func( inst_info_vs, name="{OP}_v_{S_TYPE}_{TYPE}{SEW}m{LMUL}".format_map(args) + @@ -190,7 +223,11 @@ def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): continue inst_info_v = InstInfo.get( - args, decorator, inst_type, extra_attr=ExtraAttr.INT_EXTENSION) + args, + decorator, + inst_type, + extra_attr=ExtraAttr.INT_EXTENSION, + required_ext=required_ext_list) G.func( inst_info_v, diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py new file mode 100644 index 000000000..a655096ee --- /dev/null +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py @@ -0,0 +1,240 @@ +""" +Template for rendering vector crypto intrinsics. +Current version is for v20230531. +https://github.com/riscv/riscv-crypto/blob/v20230531/doc/vector/riscv-crypto-spec-vector.adoc +""" + +from utils import prod +from utils import TypeHelper +from enums import InstInfo +from enums import InstType +from enums import ExtraAttr + +operand_mnemonic_dict = {} +# Zvbb: Vector Bit-manipulation used in Cryptography +operand_mnemonic_dict["vandn"] = ["vv", "vx"] +operand_mnemonic_dict["vbrev"] = ["v"] +operand_mnemonic_dict["vbrev8"] = ["v"] +operand_mnemonic_dict["vrev8"] = ["v"] +operand_mnemonic_dict["vclz"] = ["v"] +operand_mnemonic_dict["vctz"] = ["v"] +operand_mnemonic_dict["vcpop"] = ["v"] +operand_mnemonic_dict["vrol"] = ["vv", "vx"] +operand_mnemonic_dict["vror"] = ["vv", "vx"] # saving the `vi` variant +operand_mnemonic_dict["vwsll"] = ["vv", "vx"] # saving the `vi` variant +# Zvbc: Vector Carryless Multiplication +operand_mnemonic_dict["vclmul"] = ["vv", "vx"] +operand_mnemonic_dict["vclmulh"] = ["vv", "vx"] +# Zvkg: Vector GCM/GMAC +operand_mnemonic_dict["vghsh"] = ["vv"] +operand_mnemonic_dict["vgmul"] = ["vv"] +# Zvkned: NIST Suite: Vector AES Block Cipher +operand_mnemonic_dict["vaesef"] = ["vv", "vs"] +operand_mnemonic_dict["vaesem"] = ["vv", "vs"] +operand_mnemonic_dict["vaesdf"] = ["vv", "vs"] +operand_mnemonic_dict["vaesdm"] = ["vv", "vs"] +operand_mnemonic_dict["vaeskf1"] = ["vi"] +operand_mnemonic_dict["vaeskf2"] = ["vi"] +operand_mnemonic_dict["vaesz"] = ["vs"] +# Zvkned: NIST Suite: Vector AES Block Cipher +operand_mnemonic_dict["vsha2ms"] = ["vv"] +operand_mnemonic_dict["vsha2ch"] = ["vv"] +operand_mnemonic_dict["vsha2cl"] = ["vv"] +# Zvkned: NIST Suite: Vector AES Block Cipher +operand_mnemonic_dict["vsm4k"] = ["vi"] +operand_mnemonic_dict["vsm4r"] = ["vv", "vs"] +# Zvksh: ShangMi Suite: SM3 Secure Hash +operand_mnemonic_dict["vsm3me"] = ["vv"] +operand_mnemonic_dict["vsm3c"] = ["vi"] + + +def has_vd_input(name): + has_vd_input_inst_set = { + "vghsh", "vgmul", "vaesef", "vaesem", "vaesdf", "vaesdm", "vaesz", + "vsha2ms", "vsha2ch", "vsha2cl", "vsm4r", "vsm3c", "vaeskf2" + } + + return name in has_vd_input_inst_set + + +def has_vs1_input(name): + has_vs1_input_inst_set = { + "vandn", "vrol", "vror", "vwsll", "vclmul", "vclmulh", "vghsh", "vsha2ms", + "vsha2ch", "vsha2cl", "vsm3me" + } + + return name in has_vs1_input_inst_set + + +def has_rs1_input(name): + has_rs1_input_inst_set = { + "vandn", "vrol", "vror", "vwsll", "vclmul", "vclmulh" + } + + return name in has_rs1_input_inst_set + + +def render(G, + op_list, + type_list, + sew_list, + lmul_list, + decorator_list, + description, + required_ext_list=None): + #pylint: disable=invalid-name + # FIXME: Renaming 'G' to 'g' all in once later. + G.emit_function_group_description(description) + G.inst_group_prologue() + + for decorator in decorator_list: + decorator.write_text_header(G) + for args in prod(OP=op_list, TYPE=type_list, SEW=sew_list, LMUL=lmul_list): + assert args["OP"] is not None + op = args["OP"] + for operand_mnemonic in operand_mnemonic_dict[op]: + if operand_mnemonic in ("vv", "vs"): + if op == "vwsll": + inst_info = InstInfo.get( + args, + decorator, + InstType.WVV, + ExtraAttr.NO_ATTR, + required_ext=required_ext_list) + else: + inst_info = InstInfo.get( + args, + decorator, + InstType.VV, + ExtraAttr.NO_ATTR, + required_ext=required_ext_list) + elif operand_mnemonic == "vx": + if op == "vwsll": + inst_info = InstInfo.get( + args, + decorator, + InstType.WVX, + ExtraAttr.NO_ATTR, + required_ext=required_ext_list) + else: + inst_info = InstInfo.get( + args, + decorator, + InstType.VX, + ExtraAttr.NO_ATTR, + required_ext=required_ext_list) + elif operand_mnemonic == "vi": + inst_info = InstInfo.get( + args, + decorator, + InstType.VI, + ExtraAttr.NO_ATTR, + required_ext=required_ext_list) + elif operand_mnemonic == "v": + inst_info = InstInfo.get( + args, + decorator, + InstType.V, + ExtraAttr.NO_ATTR, + required_ext=required_ext_list) + else: + assert False, "Unreachable, unrecognized mnemonic" + + args["MNEMONIC"] = operand_mnemonic + type_helper = TypeHelper(**args) + kwargs = {} + if op == "vwsll": + kwargs["return_type"] = type_helper.wv + else: + kwargs["return_type"] = type_helper.v + if op == "vwsll": + kwargs = { + **kwargs, + **decorator.mask_args(type_helper.m, type_helper.wv) + } + else: + kwargs = { + **kwargs, + **decorator.mask_args(type_helper.m, type_helper.v) + } + # If vd is already in the input parameter, we don't need to emit another + # parameter when tail policy is TU. + if has_vd_input(op): + kwargs["vd"] = type_helper.v + else: + if op == "vwsll": + kwargs = {**kwargs, **decorator.tu_dest_args(type_helper.wv)} + else: + kwargs = {**kwargs, **decorator.tu_dest_args(type_helper.v)} + + kwargs["vs2"] = type_helper.v + + if operand_mnemonic == "vv" and has_vs1_input(op): + kwargs["vs1"] = type_helper.v + if operand_mnemonic == "vx" and has_rs1_input(op): + if op in ["vwsll", "vrol", "vror"]: + kwargs["rs1"] = type_helper.size_t + else: + kwargs["rs1"] = type_helper.s + if "vi" in operand_mnemonic_dict[op]: + kwargs["uimm"] = type_helper.size_t + + kwargs["vl"] = type_helper.size_t + + lmul_num = 2**(lmul_list.index(args["LMUL"]) - 3) + if int(args["SEW"] / lmul_num) == 64: + inst_info.add_required_ext("zve64x") + else: + inst_info.add_required_ext("zve32x") + # Add Zvl constraint + # If zvkg, zvkned, zvknha, zvknhb, zvksed, zvksh in required_ext_list, + # then add Zvl constraint by checking if LMUL * VLEN >= EGW + if any(ext in inst_info.get_required_exts() for ext in + ["zvkg", "zvkned", "zvknha", "zvknhb", "zvksed", "zvksh"]): + # EGW = EGS * EEW(SEW) + # For SM3 instruction group (Zvksh), EGS = 8, otherwise EGS = 4 + if op in ["vsm3me", "vsm3c"]: + EGW = int(8 * args["SEW"]) + else: + EGW = int(4 * args["SEW"]) + required_VLEN = int(EGW / lmul_num) + if required_VLEN >= 32: + inst_info.add_required_ext(f"zvl{int(EGW / lmul_num)}b") + # If SEW == 64, zvknhb is required. + # Zvknhb also requires zve64x + # Note that zvknhb is mutually exclusive with zvknha + if op in ["vsha2ms", "vsha2ch", "vsha2cl"] and args["SEW"] == 64: + inst_info.remove_required_ext("zvknha") + inst_info.add_required_ext("zvknhb") + inst_info.add_required_ext("zve64x") + + if operand_mnemonic == "vs": + starting_from_lmul_index = lmul_list.index(args["LMUL"]) + # print(starting_from_lmul_index) + for i in range(starting_from_lmul_index, len(lmul_list)): + if args["LMUL"] == 8: + continue + + kwargs["return_type"] =\ + f"v{args['TYPE']}{args['SEW']}m{lmul_list[i]}_t" + kwargs["vd"] = f"v{args['TYPE']}{args['SEW']}m{lmul_list[i]}_t" + kwargs["vs2"] = f"v{args['TYPE']}{args['SEW']}m{args['LMUL']}_t" + func_name = "{OP}_{MNEMONIC}_".format_map(args) +\ + f"{args['TYPE']}{args['SEW']}m{args['LMUL']}_" +\ + f"{args['TYPE']}{args['SEW']}m{lmul_list[i]}" + G.func(inst_info, name=func_name + decorator.func_suffix, **kwargs) + else: + if op == "vwsll": + G.func( + inst_info, + name="{OP}_{MNEMONIC}_{TYPE}{WSEW}m{WLMUL}".format_map(args) + + decorator.func_suffix, + **kwargs) + else: + G.func( + inst_info, + name="{OP}_{MNEMONIC}_{TYPE}{SEW}m{LMUL}".format_map(args) + + decorator.func_suffix, + **kwargs) + + G.inst_group_epilogue() diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/testing-report b/rvv-intrinsic-generator/rvv_intrinsic_gen/testing-report index 14d314f92..73cfd7f2e 100755 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/testing-report +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/testing-report @@ -27,8 +27,10 @@ from junitparser import JUnitXml, TestSuite, TestCase, Skipped, Error, Failure sys.path = [os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))] + sys.path +import bfloat16_inst import generator import inst +import vector_crypto_inst class bcolors: HEADER = '\033[95m' @@ -88,8 +90,8 @@ def api_testing_report(stats, opts): # passed if the number of warning equal to line number with open(log, 'r') as fp: last_line = fp.readlines()[-1] - if last_line.count('error') == 0: - if last_line.count('warning') != 0 and opts.warning_as_error: + if opts.warning_as_error: + if 'error' not in last_line and 'warning' in last_line: stats[grp][subgrp].failed_list.append(testname) result.failed_list.append(testname) test_case.result = [Error("Treat warning as error", "warning")] @@ -287,6 +289,8 @@ def parse_args(args): if __name__ == "__main__": g = generator.Grouper() inst.gen(g) + bfloat16_inst.gen(g) + vector_crypto_inst.gen(g) stats = dict() for grp, subgrps in g.groups.items(): diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/utils.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/utils.py index 190b0b426..6433eff12 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/utils.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/utils.py @@ -139,6 +139,9 @@ def s(self): return "double" else: assert False, "Unhandled SEW under float type" + if self.args["TYPE"] == "bfloat": + assert self.args["SEW"] == 16, "BFloat16 only, no other SEW allowed" + return "__bf16" return "{TYPE}{SEW}_t".format_map(self.args) @property diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py new file mode 100644 index 000000000..338bc0dbf --- /dev/null +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py @@ -0,0 +1,240 @@ +""" +Declares the vector crypto intrinsics through the vector crypto template. +""" + +from intrinsic_decorator import IntrinsicDecorators +from templates import vector_crypto_template +from constants import LMULS, WLMULS, SEWS, WSEWS, UITYPE + + +def gen(g): + decorators = IntrinsicDecorators(g.has_tail_policy) + + g.start_group("Zvbb - Vector Bit-manipulation used in Cryptography") + + g.function_group( + vector_crypto_template, + "Vector Bit-manipulation used in Cryptography - Bitwise And-Not", + "", # FIXME: We probably have a separate document for vector-crypto + ["vandn"], + UITYPE, + SEWS, + LMULS, + decorators.has_masking_maskedoff_policy, + required_ext_list=["zvbb"]) + + g.function_group( + vector_crypto_template, + "Vector Basic Bit-manipulation - Reverse", + "", # FIXME: We probably have a separate document for vector-crypto + ["vbrev", "vbrev8", "vrev8"], + UITYPE, + SEWS, + LMULS, + decorators.has_masking_maskedoff_policy, + required_ext_list=["zvbb"]) + + g.function_group( + vector_crypto_template, + "Vector Basic Bit-manipulation - Count Bits", + "", # FIXME: We probably have a separate document for vector-crypto + ["vclz", "vctz"], + UITYPE, + SEWS, + LMULS, + decorators.has_masking_maskedoff_policy, + required_ext_list=["zvbb"]) + + g.function_group( + vector_crypto_template, + "Vector Basic Bit-manipulation - Vector Population Count", + "", # FIXME: We probably have a separate document for vector-crypto + ["vcpop"], + UITYPE, + SEWS, + LMULS, + decorators.has_masking_maskedoff_policy, + required_ext_list=["zvbb"]) + + g.function_group( + vector_crypto_template, + "Vector Bit-manipulation used in Cryptography - Rotate", + "", # FIXME: We probably have a separate document for vector-crypto + ["vrol", "vror"], + UITYPE, + SEWS, + LMULS, + decorators.has_masking_maskedoff_policy, + required_ext_list=["zvkb"]) + + g.function_group( + vector_crypto_template, + "Vector Basic Bit-manipulation used - Widening Shift", + "", # FIXME: We probably have a separate document for vector-crypto + ["vwsll"], + UITYPE, + WSEWS, + WLMULS, + decorators.has_masking_maskedoff_policy, + required_ext_list=["zvbb"]) + + #################################################################### + + g.start_group("Zvbc - Vector Carryless Multiplication") + + g.function_group( + vector_crypto_template, + "Vector Carryless Multiplication", + "", # FIXME: We probably have a separate document for vector-crypto + ["vclmul", "vclmulh"], + UITYPE, + [64], + LMULS, + decorators.has_masking_maskedoff_policy, + required_ext_list=["zvbc"]) + + #################################################################### + + g.start_group("Zvkg - Vector GCM/GMAC") + + g.function_group( + vector_crypto_template, + "Vector GCM/GMAC", + "", # FIXME: We probably have a separate document for vector-crypto + ["vghsh", "vgmul"], + UITYPE, + [32], + LMULS, + decorators.has_no_masking_policy, + required_ext_list=["zvkg"]) + + #################################################################### + + g.start_group("Zvkned - NIST Suite: Vector AES Block Cipher") + + g.function_group( + vector_crypto_template, + "Vector AES Encryption", + "", # FIXME: We probably have a separate document for vector-crypto + ["vaesef", "vaesem"], + UITYPE, + [32], + LMULS, + decorators.has_no_masking_policy, + required_ext_list=["zvkned"]) + + g.function_group( + vector_crypto_template, + "Vector AES Decryption", + "", # FIXME: We probably have a separate document for vector-crypto + ["vaesdf", "vaesdm"], + UITYPE, + [32], + LMULS, + decorators.has_no_masking_policy, + required_ext_list=["zvkned"]) + + g.function_group( + vector_crypto_template, + "Vector AES-128 Forward KeySchedule generation", + "", # FIXME: We probably have a separate document for vector-crypto + ["vaeskf1", "vaeskf2"], + UITYPE, + [32], + LMULS, + decorators.has_no_masking_policy, + required_ext_list=["zvkned"]) + + g.function_group( + vector_crypto_template, + "Vector AES round zero", + "", # FIXME: We probably have a separate document for vector-crypto + ["vaesz"], + UITYPE, + [32], + LMULS, + decorators.has_no_masking_policy, + required_ext_list=["zvkned"]) + + #################################################################### + + g.start_group("Zvknh - NIST Suite: Vector SHA-2 Secure Hash") + + # We need extra condition to check if zvknhb is required + # If SEW=64, then zvknhb is required + g.function_group( + vector_crypto_template, + "Vector SHA-2 message schedule", + "", # FIXME: We probably have a separate document for vector-crypto + ["vsha2ms"], + UITYPE, + [32, 64], + LMULS, + decorators.has_no_masking_policy, + required_ext_list=["zvknha"]) + + g.function_group( + vector_crypto_template, + "Vector SHA-2 two rounds of compression", + "", # FIXME: We probably have a separate document for vector-crypto + ["vsha2ch", "vsha2cl"], + UITYPE, + [32, 64], + LMULS, + decorators.has_no_masking_policy, + required_ext_list=["zvknha"]) + + #################################################################### + + g.start_group("Zvksed - ShangMi Suite: SM4 Block Cipher") + + g.function_group( + vector_crypto_template, + "Vector SM4 KeyExpansion", + "", # FIXME: We probably have a separate document for vector-crypto + ["vsm4k"], + UITYPE, + [32], + LMULS, + decorators.has_no_masking_policy, + required_ext_list=["zvksed"]) + + g.function_group( + vector_crypto_template, + "Vector SM4 Rounds", + "", # FIXME: We probably have a separate document for vector-crypto + ["vsm4r"], + UITYPE, + [32], + LMULS, + decorators.has_no_masking_policy, + required_ext_list=["zvksed"]) + + #################################################################### + + g.start_group("Zvksh - ShangMi Suite: SM3 Secure Hash") + + g.function_group( + vector_crypto_template, + "Vector SM3 Message Expansion", + "", # FIXME: We probably have a separate document for vector-crypto + ["vsm3me"], + UITYPE, + [32], + LMULS, + decorators.has_no_masking_policy, + required_ext_list=["zvksh"]) + + g.function_group( + vector_crypto_template, + "Vector SM3 Compression", + "", # FIXME: We probably have a separate document for vector-crypto + ["vsm3c"], + UITYPE, + [32], + LMULS, + decorators.has_no_masking_policy, + required_ext_list=["zvksh"]) + + +#################################################################### diff --git a/vector_crypto_notes.adoc b/vector_crypto_notes.adoc new file mode 100644 index 000000000..e9c60396e --- /dev/null +++ b/vector_crypto_notes.adoc @@ -0,0 +1,15 @@ += Note for vector crypto intrinsics + +== Availability of vector crypto intrinsics + +Availability for the vector crypto instruction intrinsics will depend on the minimum vector length specified in the architecture via the `Zvl*b` ^0^ sub-extension. Vector length is required to be at least one EGW (element group width ^1^) long. + +Take the intrinsic of `vaesdf.vs` as an example. Given that the instruction will compute with a single element group provided from `vs2`, `vuint32mf2_t` of must be at least 128 bits long. Therefore the intrinsic requires `zvl256b` to be available. + +``` +vuint32m4_t __riscv_vaesdf_vs_u32m4 (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +``` + +^0^ https://github.com/riscv/riscv-v-spec/blob/master/v-spec.adoc#181-zvl-minimum-vector-length-standard-extensions[v-spec 18.1. Zvl*: Minimum Vector Length Standard Extensions] + +^1^ https://github.com/riscv/riscv-crypto/blob/master/doc/vector/riscv-crypto-vector-element-groups.adoc[Vector Crypto Specification: Element Groups]